X86RegisterInfo.cpp revision 849f214a4e3676e41168b0c5398165c4d4fb99f8
1//===- X86RegisterInfo.cpp - X86 Register Information -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by the LLVM research group and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the X86 implementation of the MRegisterInfo class. This 11// file is responsible for the frame pointer elimination optimization on X86. 12// 13//===----------------------------------------------------------------------===// 14 15#include "X86.h" 16#include "X86RegisterInfo.h" 17#include "X86InstrBuilder.h" 18#include "X86MachineFunctionInfo.h" 19#include "X86Subtarget.h" 20#include "X86TargetMachine.h" 21#include "llvm/Constants.h" 22#include "llvm/Function.h" 23#include "llvm/Type.h" 24#include "llvm/CodeGen/ValueTypes.h" 25#include "llvm/CodeGen/MachineInstrBuilder.h" 26#include "llvm/CodeGen/MachineFunction.h" 27#include "llvm/CodeGen/MachineFrameInfo.h" 28#include "llvm/CodeGen/MachineLocation.h" 29#include "llvm/Target/TargetAsmInfo.h" 30#include "llvm/Target/TargetFrameInfo.h" 31#include "llvm/Target/TargetInstrInfo.h" 32#include "llvm/Target/TargetMachine.h" 33#include "llvm/Target/TargetOptions.h" 34#include "llvm/Support/CommandLine.h" 35#include "llvm/ADT/BitVector.h" 36#include "llvm/ADT/STLExtras.h" 37using namespace llvm; 38 39namespace { 40 cl::opt<bool> 41 NoFusing("disable-spill-fusing", 42 cl::desc("Disable fusing of spill code into instructions")); 43 cl::opt<bool> 44 PrintFailedFusing("print-failed-fuse-candidates", 45 cl::desc("Print instructions that the allocator wants to" 46 " fuse, but the X86 backend currently can't"), 47 cl::Hidden); 48} 49 50X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm, 51 const TargetInstrInfo &tii) 52 : X86GenRegisterInfo(X86::ADJCALLSTACKDOWN, X86::ADJCALLSTACKUP), 53 TM(tm), TII(tii) { 54 // Cache some information. 55 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 56 Is64Bit = Subtarget->is64Bit(); 57 if (Is64Bit) { 58 SlotSize = 8; 59 StackPtr = X86::RSP; 60 FramePtr = X86::RBP; 61 } else { 62 SlotSize = 4; 63 StackPtr = X86::ESP; 64 FramePtr = X86::EBP; 65 } 66} 67 68void X86RegisterInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 69 MachineBasicBlock::iterator MI, 70 unsigned SrcReg, int FrameIdx, 71 const TargetRegisterClass *RC) const { 72 unsigned Opc; 73 if (RC == &X86::GR64RegClass) { 74 Opc = X86::MOV64mr; 75 } else if (RC == &X86::GR32RegClass) { 76 Opc = X86::MOV32mr; 77 } else if (RC == &X86::GR16RegClass) { 78 Opc = X86::MOV16mr; 79 } else if (RC == &X86::GR8RegClass) { 80 Opc = X86::MOV8mr; 81 } else if (RC == &X86::GR32_RegClass) { 82 Opc = X86::MOV32_mr; 83 } else if (RC == &X86::GR16_RegClass) { 84 Opc = X86::MOV16_mr; 85 } else if (RC == &X86::RFP64RegClass || RC == &X86::RSTRegClass) { 86 Opc = X86::FpST64m; 87 } else if (RC == &X86::RFP32RegClass) { 88 Opc = X86::FpST32m; 89 } else if (RC == &X86::FR32RegClass) { 90 Opc = X86::MOVSSmr; 91 } else if (RC == &X86::FR64RegClass) { 92 Opc = X86::MOVSDmr; 93 } else if (RC == &X86::VR128RegClass) { 94 Opc = X86::MOVAPSmr; 95 } else if (RC == &X86::VR64RegClass) { 96 Opc = X86::MMX_MOVQ64mr; 97 } else { 98 assert(0 && "Unknown regclass"); 99 abort(); 100 } 101 addFrameReference(BuildMI(MBB, MI, TII.get(Opc)), FrameIdx) 102 .addReg(SrcReg, false, false, true); 103} 104 105void X86RegisterInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 106 MachineBasicBlock::iterator MI, 107 unsigned DestReg, int FrameIdx, 108 const TargetRegisterClass *RC) const{ 109 unsigned Opc; 110 if (RC == &X86::GR64RegClass) { 111 Opc = X86::MOV64rm; 112 } else if (RC == &X86::GR32RegClass) { 113 Opc = X86::MOV32rm; 114 } else if (RC == &X86::GR16RegClass) { 115 Opc = X86::MOV16rm; 116 } else if (RC == &X86::GR8RegClass) { 117 Opc = X86::MOV8rm; 118 } else if (RC == &X86::GR32_RegClass) { 119 Opc = X86::MOV32_rm; 120 } else if (RC == &X86::GR16_RegClass) { 121 Opc = X86::MOV16_rm; 122 } else if (RC == &X86::RFP64RegClass || RC == &X86::RSTRegClass) { 123 Opc = X86::FpLD64m; 124 } else if (RC == &X86::RFP32RegClass) { 125 Opc = X86::FpLD32m; 126 } else if (RC == &X86::FR32RegClass) { 127 Opc = X86::MOVSSrm; 128 } else if (RC == &X86::FR64RegClass) { 129 Opc = X86::MOVSDrm; 130 } else if (RC == &X86::VR128RegClass) { 131 Opc = X86::MOVAPSrm; 132 } else if (RC == &X86::VR64RegClass) { 133 Opc = X86::MMX_MOVQ64rm; 134 } else { 135 assert(0 && "Unknown regclass"); 136 abort(); 137 } 138 addFrameReference(BuildMI(MBB, MI, TII.get(Opc), DestReg), FrameIdx); 139} 140 141void X86RegisterInfo::copyRegToReg(MachineBasicBlock &MBB, 142 MachineBasicBlock::iterator MI, 143 unsigned DestReg, unsigned SrcReg, 144 const TargetRegisterClass *RC) const { 145 unsigned Opc; 146 if (RC == &X86::GR64RegClass) { 147 Opc = X86::MOV64rr; 148 } else if (RC == &X86::GR32RegClass) { 149 Opc = X86::MOV32rr; 150 } else if (RC == &X86::GR16RegClass) { 151 Opc = X86::MOV16rr; 152 } else if (RC == &X86::GR8RegClass) { 153 Opc = X86::MOV8rr; 154 } else if (RC == &X86::GR32_RegClass) { 155 Opc = X86::MOV32_rr; 156 } else if (RC == &X86::GR16_RegClass) { 157 Opc = X86::MOV16_rr; 158 } else if (RC == &X86::RFP32RegClass) { 159 Opc = X86::FpMOV3232; 160 } else if (RC == &X86::RFP64RegClass || RC == &X86::RSTRegClass) { 161 Opc = X86::FpMOV6464; 162 } else if (RC == &X86::FR32RegClass) { 163 Opc = X86::FsMOVAPSrr; 164 } else if (RC == &X86::FR64RegClass) { 165 Opc = X86::FsMOVAPDrr; 166 } else if (RC == &X86::VR128RegClass) { 167 Opc = X86::MOVAPSrr; 168 } else if (RC == &X86::VR64RegClass) { 169 Opc = X86::MMX_MOVQ64rr; 170 } else { 171 assert(0 && "Unknown regclass"); 172 abort(); 173 } 174 BuildMI(MBB, MI, TII.get(Opc), DestReg).addReg(SrcReg); 175} 176 177 178void X86RegisterInfo::reMaterialize(MachineBasicBlock &MBB, 179 MachineBasicBlock::iterator I, 180 unsigned DestReg, 181 const MachineInstr *Orig) const { 182 MachineInstr *MI = Orig->clone(); 183 MI->getOperand(0).setReg(DestReg); 184 MBB.insert(I, MI); 185} 186 187static MachineInstr *FuseTwoAddrInst(unsigned Opcode, unsigned FrameIndex, 188 MachineInstr *MI, 189 const TargetInstrInfo &TII) { 190 unsigned NumOps = TII.getNumOperands(MI->getOpcode())-2; 191 // Create the base instruction with the memory operand as the first part. 192 MachineInstrBuilder MIB = addFrameReference(BuildMI(TII.get(Opcode)), 193 FrameIndex); 194 195 // Loop over the rest of the ri operands, converting them over. 196 for (unsigned i = 0; i != NumOps; ++i) { 197 MachineOperand &MO = MI->getOperand(i+2); 198 if (MO.isReg()) 199 MIB = MIB.addReg(MO.getReg(), false, MO.isImplicit()); 200 else if (MO.isImm()) 201 MIB = MIB.addImm(MO.getImm()); 202 else if (MO.isGlobalAddress()) 203 MIB = MIB.addGlobalAddress(MO.getGlobal(), MO.getOffset()); 204 else if (MO.isJumpTableIndex()) 205 MIB = MIB.addJumpTableIndex(MO.getJumpTableIndex()); 206 else if (MO.isExternalSymbol()) 207 MIB = MIB.addExternalSymbol(MO.getSymbolName()); 208 else 209 assert(0 && "Unknown operand type!"); 210 } 211 return MIB; 212} 213 214static MachineInstr *FuseInst(unsigned Opcode, unsigned OpNo, 215 unsigned FrameIndex, MachineInstr *MI, 216 const TargetInstrInfo &TII) { 217 MachineInstrBuilder MIB = BuildMI(TII.get(Opcode)); 218 219 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 220 MachineOperand &MO = MI->getOperand(i); 221 if (i == OpNo) { 222 assert(MO.isReg() && "Expected to fold into reg operand!"); 223 MIB = addFrameReference(MIB, FrameIndex); 224 } else if (MO.isReg()) 225 MIB = MIB.addReg(MO.getReg(), MO.isDef(), MO.isImplicit()); 226 else if (MO.isImm()) 227 MIB = MIB.addImm(MO.getImm()); 228 else if (MO.isGlobalAddress()) 229 MIB = MIB.addGlobalAddress(MO.getGlobal(), MO.getOffset()); 230 else if (MO.isJumpTableIndex()) 231 MIB = MIB.addJumpTableIndex(MO.getJumpTableIndex()); 232 else if (MO.isExternalSymbol()) 233 MIB = MIB.addExternalSymbol(MO.getSymbolName()); 234 else 235 assert(0 && "Unknown operand for FuseInst!"); 236 } 237 return MIB; 238} 239 240static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, 241 unsigned Opcode, unsigned FrameIndex, 242 MachineInstr *MI) { 243 return addFrameReference(BuildMI(TII.get(Opcode)), FrameIndex).addImm(0); 244} 245 246 247//===----------------------------------------------------------------------===// 248// Efficient Lookup Table Support 249//===----------------------------------------------------------------------===// 250 251namespace { 252 /// TableEntry - Maps the 'from' opcode to a fused form of the 'to' opcode. 253 /// 254 struct TableEntry { 255 unsigned from; // Original opcode. 256 unsigned to; // New opcode. 257 258 // less operators used by STL search. 259 bool operator<(const TableEntry &TE) const { return from < TE.from; } 260 friend bool operator<(const TableEntry &TE, unsigned V) { 261 return TE.from < V; 262 } 263 friend bool operator<(unsigned V, const TableEntry &TE) { 264 return V < TE.from; 265 } 266 }; 267} 268 269/// TableIsSorted - Return true if the table is in 'from' opcode order. 270/// 271static bool TableIsSorted(const TableEntry *Table, unsigned NumEntries) { 272 for (unsigned i = 1; i != NumEntries; ++i) 273 if (!(Table[i-1] < Table[i])) { 274 cerr << "Entries out of order " << Table[i-1].from 275 << " " << Table[i].from << "\n"; 276 return false; 277 } 278 return true; 279} 280 281/// TableLookup - Return the table entry matching the specified opcode. 282/// Otherwise return NULL. 283static const TableEntry *TableLookup(const TableEntry *Table, unsigned N, 284 unsigned Opcode) { 285 const TableEntry *I = std::lower_bound(Table, Table+N, Opcode); 286 if (I != Table+N && I->from == Opcode) 287 return I; 288 return NULL; 289} 290 291#define ARRAY_SIZE(TABLE) \ 292 (sizeof(TABLE)/sizeof(TABLE[0])) 293 294#ifdef NDEBUG 295#define ASSERT_SORTED(TABLE) 296#else 297#define ASSERT_SORTED(TABLE) \ 298 { static bool TABLE##Checked = false; \ 299 if (!TABLE##Checked) { \ 300 assert(TableIsSorted(TABLE, ARRAY_SIZE(TABLE)) && \ 301 "All lookup tables must be sorted for efficient access!"); \ 302 TABLE##Checked = true; \ 303 } \ 304 } 305#endif 306 307 308MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, 309 unsigned i, 310 int FrameIndex) const { 311 // Check switch flag 312 if (NoFusing) return NULL; 313 314 // Table (and size) to search 315 const TableEntry *OpcodeTablePtr = NULL; 316 unsigned OpcodeTableSize = 0; 317 bool isTwoAddrFold = false; 318 unsigned NumOps = TII.getNumOperands(MI->getOpcode()); 319 bool isTwoAddr = NumOps > 1 && 320 MI->getInstrDescriptor()->getOperandConstraint(1, TOI::TIED_TO) != -1; 321 322 MachineInstr *NewMI = NULL; 323 // Folding a memory location into the two-address part of a two-address 324 // instruction is different than folding it other places. It requires 325 // replacing the *two* registers with the memory location. 326 if (isTwoAddr && NumOps >= 2 && i < 2 && 327 MI->getOperand(0).isReg() && 328 MI->getOperand(1).isReg() && 329 MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) { 330 static const TableEntry OpcodeTable[] = { 331 { X86::ADC32ri, X86::ADC32mi }, 332 { X86::ADC32ri8, X86::ADC32mi8 }, 333 { X86::ADC32rr, X86::ADC32mr }, 334 { X86::ADC64ri32, X86::ADC64mi32 }, 335 { X86::ADC64ri8, X86::ADC64mi8 }, 336 { X86::ADC64rr, X86::ADC64mr }, 337 { X86::ADD16ri, X86::ADD16mi }, 338 { X86::ADD16ri8, X86::ADD16mi8 }, 339 { X86::ADD16rr, X86::ADD16mr }, 340 { X86::ADD32ri, X86::ADD32mi }, 341 { X86::ADD32ri8, X86::ADD32mi8 }, 342 { X86::ADD32rr, X86::ADD32mr }, 343 { X86::ADD64ri32, X86::ADD64mi32 }, 344 { X86::ADD64ri8, X86::ADD64mi8 }, 345 { X86::ADD64rr, X86::ADD64mr }, 346 { X86::ADD8ri, X86::ADD8mi }, 347 { X86::ADD8rr, X86::ADD8mr }, 348 { X86::AND16ri, X86::AND16mi }, 349 { X86::AND16ri8, X86::AND16mi8 }, 350 { X86::AND16rr, X86::AND16mr }, 351 { X86::AND32ri, X86::AND32mi }, 352 { X86::AND32ri8, X86::AND32mi8 }, 353 { X86::AND32rr, X86::AND32mr }, 354 { X86::AND64ri32, X86::AND64mi32 }, 355 { X86::AND64ri8, X86::AND64mi8 }, 356 { X86::AND64rr, X86::AND64mr }, 357 { X86::AND8ri, X86::AND8mi }, 358 { X86::AND8rr, X86::AND8mr }, 359 { X86::DEC16r, X86::DEC16m }, 360 { X86::DEC32r, X86::DEC32m }, 361 { X86::DEC64_16r, X86::DEC16m }, 362 { X86::DEC64_32r, X86::DEC32m }, 363 { X86::DEC64r, X86::DEC64m }, 364 { X86::DEC8r, X86::DEC8m }, 365 { X86::INC16r, X86::INC16m }, 366 { X86::INC32r, X86::INC32m }, 367 { X86::INC64_16r, X86::INC16m }, 368 { X86::INC64_32r, X86::INC32m }, 369 { X86::INC64r, X86::INC64m }, 370 { X86::INC8r, X86::INC8m }, 371 { X86::NEG16r, X86::NEG16m }, 372 { X86::NEG32r, X86::NEG32m }, 373 { X86::NEG64r, X86::NEG64m }, 374 { X86::NEG8r, X86::NEG8m }, 375 { X86::NOT16r, X86::NOT16m }, 376 { X86::NOT32r, X86::NOT32m }, 377 { X86::NOT64r, X86::NOT64m }, 378 { X86::NOT8r, X86::NOT8m }, 379 { X86::OR16ri, X86::OR16mi }, 380 { X86::OR16ri8, X86::OR16mi8 }, 381 { X86::OR16rr, X86::OR16mr }, 382 { X86::OR32ri, X86::OR32mi }, 383 { X86::OR32ri8, X86::OR32mi8 }, 384 { X86::OR32rr, X86::OR32mr }, 385 { X86::OR64ri32, X86::OR64mi32 }, 386 { X86::OR64ri8, X86::OR64mi8 }, 387 { X86::OR64rr, X86::OR64mr }, 388 { X86::OR8ri, X86::OR8mi }, 389 { X86::OR8rr, X86::OR8mr }, 390 { X86::ROL16r1, X86::ROL16m1 }, 391 { X86::ROL16rCL, X86::ROL16mCL }, 392 { X86::ROL16ri, X86::ROL16mi }, 393 { X86::ROL32r1, X86::ROL32m1 }, 394 { X86::ROL32rCL, X86::ROL32mCL }, 395 { X86::ROL32ri, X86::ROL32mi }, 396 { X86::ROL64r1, X86::ROL64m1 }, 397 { X86::ROL64rCL, X86::ROL64mCL }, 398 { X86::ROL64ri, X86::ROL64mi }, 399 { X86::ROL8r1, X86::ROL8m1 }, 400 { X86::ROL8rCL, X86::ROL8mCL }, 401 { X86::ROL8ri, X86::ROL8mi }, 402 { X86::ROR16r1, X86::ROR16m1 }, 403 { X86::ROR16rCL, X86::ROR16mCL }, 404 { X86::ROR16ri, X86::ROR16mi }, 405 { X86::ROR32r1, X86::ROR32m1 }, 406 { X86::ROR32rCL, X86::ROR32mCL }, 407 { X86::ROR32ri, X86::ROR32mi }, 408 { X86::ROR64r1, X86::ROR64m1 }, 409 { X86::ROR64rCL, X86::ROR64mCL }, 410 { X86::ROR64ri, X86::ROR64mi }, 411 { X86::ROR8r1, X86::ROR8m1 }, 412 { X86::ROR8rCL, X86::ROR8mCL }, 413 { X86::ROR8ri, X86::ROR8mi }, 414 { X86::SAR16r1, X86::SAR16m1 }, 415 { X86::SAR16rCL, X86::SAR16mCL }, 416 { X86::SAR16ri, X86::SAR16mi }, 417 { X86::SAR32r1, X86::SAR32m1 }, 418 { X86::SAR32rCL, X86::SAR32mCL }, 419 { X86::SAR32ri, X86::SAR32mi }, 420 { X86::SAR64r1, X86::SAR64m1 }, 421 { X86::SAR64rCL, X86::SAR64mCL }, 422 { X86::SAR64ri, X86::SAR64mi }, 423 { X86::SAR8r1, X86::SAR8m1 }, 424 { X86::SAR8rCL, X86::SAR8mCL }, 425 { X86::SAR8ri, X86::SAR8mi }, 426 { X86::SBB32ri, X86::SBB32mi }, 427 { X86::SBB32ri8, X86::SBB32mi8 }, 428 { X86::SBB32rr, X86::SBB32mr }, 429 { X86::SBB64ri32, X86::SBB64mi32 }, 430 { X86::SBB64ri8, X86::SBB64mi8 }, 431 { X86::SBB64rr, X86::SBB64mr }, 432 { X86::SHL16r1, X86::SHL16m1 }, 433 { X86::SHL16rCL, X86::SHL16mCL }, 434 { X86::SHL16ri, X86::SHL16mi }, 435 { X86::SHL32r1, X86::SHL32m1 }, 436 { X86::SHL32rCL, X86::SHL32mCL }, 437 { X86::SHL32ri, X86::SHL32mi }, 438 { X86::SHL64r1, X86::SHL64m1 }, 439 { X86::SHL64rCL, X86::SHL64mCL }, 440 { X86::SHL64ri, X86::SHL64mi }, 441 { X86::SHL8r1, X86::SHL8m1 }, 442 { X86::SHL8rCL, X86::SHL8mCL }, 443 { X86::SHL8ri, X86::SHL8mi }, 444 { X86::SHLD16rrCL, X86::SHLD16mrCL }, 445 { X86::SHLD16rri8, X86::SHLD16mri8 }, 446 { X86::SHLD32rrCL, X86::SHLD32mrCL }, 447 { X86::SHLD32rri8, X86::SHLD32mri8 }, 448 { X86::SHLD64rrCL, X86::SHLD64mrCL }, 449 { X86::SHLD64rri8, X86::SHLD64mri8 }, 450 { X86::SHR16r1, X86::SHR16m1 }, 451 { X86::SHR16rCL, X86::SHR16mCL }, 452 { X86::SHR16ri, X86::SHR16mi }, 453 { X86::SHR32r1, X86::SHR32m1 }, 454 { X86::SHR32rCL, X86::SHR32mCL }, 455 { X86::SHR32ri, X86::SHR32mi }, 456 { X86::SHR64r1, X86::SHR64m1 }, 457 { X86::SHR64rCL, X86::SHR64mCL }, 458 { X86::SHR64ri, X86::SHR64mi }, 459 { X86::SHR8r1, X86::SHR8m1 }, 460 { X86::SHR8rCL, X86::SHR8mCL }, 461 { X86::SHR8ri, X86::SHR8mi }, 462 { X86::SHRD16rrCL, X86::SHRD16mrCL }, 463 { X86::SHRD16rri8, X86::SHRD16mri8 }, 464 { X86::SHRD32rrCL, X86::SHRD32mrCL }, 465 { X86::SHRD32rri8, X86::SHRD32mri8 }, 466 { X86::SHRD64rrCL, X86::SHRD64mrCL }, 467 { X86::SHRD64rri8, X86::SHRD64mri8 }, 468 { X86::SUB16ri, X86::SUB16mi }, 469 { X86::SUB16ri8, X86::SUB16mi8 }, 470 { X86::SUB16rr, X86::SUB16mr }, 471 { X86::SUB32ri, X86::SUB32mi }, 472 { X86::SUB32ri8, X86::SUB32mi8 }, 473 { X86::SUB32rr, X86::SUB32mr }, 474 { X86::SUB64ri32, X86::SUB64mi32 }, 475 { X86::SUB64ri8, X86::SUB64mi8 }, 476 { X86::SUB64rr, X86::SUB64mr }, 477 { X86::SUB8ri, X86::SUB8mi }, 478 { X86::SUB8rr, X86::SUB8mr }, 479 { X86::XOR16ri, X86::XOR16mi }, 480 { X86::XOR16ri8, X86::XOR16mi8 }, 481 { X86::XOR16rr, X86::XOR16mr }, 482 { X86::XOR32ri, X86::XOR32mi }, 483 { X86::XOR32ri8, X86::XOR32mi8 }, 484 { X86::XOR32rr, X86::XOR32mr }, 485 { X86::XOR64ri32, X86::XOR64mi32 }, 486 { X86::XOR64ri8, X86::XOR64mi8 }, 487 { X86::XOR64rr, X86::XOR64mr }, 488 { X86::XOR8ri, X86::XOR8mi }, 489 { X86::XOR8rr, X86::XOR8mr } 490 }; 491 ASSERT_SORTED(OpcodeTable); 492 OpcodeTablePtr = OpcodeTable; 493 OpcodeTableSize = ARRAY_SIZE(OpcodeTable); 494 isTwoAddrFold = true; 495 } else if (i == 0) { // If operand 0 496 if (MI->getOpcode() == X86::MOV16r0) 497 NewMI = MakeM0Inst(TII, X86::MOV16mi, FrameIndex, MI); 498 else if (MI->getOpcode() == X86::MOV32r0) 499 NewMI = MakeM0Inst(TII, X86::MOV32mi, FrameIndex, MI); 500 else if (MI->getOpcode() == X86::MOV64r0) 501 NewMI = MakeM0Inst(TII, X86::MOV64mi32, FrameIndex, MI); 502 else if (MI->getOpcode() == X86::MOV8r0) 503 NewMI = MakeM0Inst(TII, X86::MOV8mi, FrameIndex, MI); 504 if (NewMI) { 505 NewMI->copyKillDeadInfo(MI); 506 return NewMI; 507 } 508 509 static const TableEntry OpcodeTable[] = { 510 { X86::CMP16ri, X86::CMP16mi }, 511 { X86::CMP16ri8, X86::CMP16mi8 }, 512 { X86::CMP32ri, X86::CMP32mi }, 513 { X86::CMP32ri8, X86::CMP32mi8 }, 514 { X86::CMP8ri, X86::CMP8mi }, 515 { X86::DIV16r, X86::DIV16m }, 516 { X86::DIV32r, X86::DIV32m }, 517 { X86::DIV64r, X86::DIV64m }, 518 { X86::DIV8r, X86::DIV8m }, 519 { X86::FsMOVAPDrr, X86::MOVSDmr }, 520 { X86::FsMOVAPSrr, X86::MOVSSmr }, 521 { X86::IDIV16r, X86::IDIV16m }, 522 { X86::IDIV32r, X86::IDIV32m }, 523 { X86::IDIV64r, X86::IDIV64m }, 524 { X86::IDIV8r, X86::IDIV8m }, 525 { X86::IMUL16r, X86::IMUL16m }, 526 { X86::IMUL32r, X86::IMUL32m }, 527 { X86::IMUL64r, X86::IMUL64m }, 528 { X86::IMUL8r, X86::IMUL8m }, 529 { X86::MOV16ri, X86::MOV16mi }, 530 { X86::MOV16rr, X86::MOV16mr }, 531 { X86::MOV32ri, X86::MOV32mi }, 532 { X86::MOV32rr, X86::MOV32mr }, 533 { X86::MOV64ri32, X86::MOV64mi32 }, 534 { X86::MOV64rr, X86::MOV64mr }, 535 { X86::MOV8ri, X86::MOV8mi }, 536 { X86::MOV8rr, X86::MOV8mr }, 537 { X86::MOVAPDrr, X86::MOVAPDmr }, 538 { X86::MOVAPSrr, X86::MOVAPSmr }, 539 { X86::MOVPDI2DIrr, X86::MOVPDI2DImr }, 540 { X86::MOVPQIto64rr,X86::MOVPQIto64mr }, 541 { X86::MOVPS2SSrr, X86::MOVPS2SSmr }, 542 { X86::MOVSDrr, X86::MOVSDmr }, 543 { X86::MOVSDto64rr, X86::MOVSDto64mr }, 544 { X86::MOVSS2DIrr, X86::MOVSS2DImr }, 545 { X86::MOVSSrr, X86::MOVSSmr }, 546 { X86::MOVUPDrr, X86::MOVUPDmr }, 547 { X86::MOVUPSrr, X86::MOVUPSmr }, 548 { X86::MUL16r, X86::MUL16m }, 549 { X86::MUL32r, X86::MUL32m }, 550 { X86::MUL64r, X86::MUL64m }, 551 { X86::MUL8r, X86::MUL8m }, 552 { X86::SETAEr, X86::SETAEm }, 553 { X86::SETAr, X86::SETAm }, 554 { X86::SETBEr, X86::SETBEm }, 555 { X86::SETBr, X86::SETBm }, 556 { X86::SETEr, X86::SETEm }, 557 { X86::SETGEr, X86::SETGEm }, 558 { X86::SETGr, X86::SETGm }, 559 { X86::SETLEr, X86::SETLEm }, 560 { X86::SETLr, X86::SETLm }, 561 { X86::SETNEr, X86::SETNEm }, 562 { X86::SETNPr, X86::SETNPm }, 563 { X86::SETNSr, X86::SETNSm }, 564 { X86::SETPr, X86::SETPm }, 565 { X86::SETSr, X86::SETSm }, 566 { X86::TEST16ri, X86::TEST16mi }, 567 { X86::TEST32ri, X86::TEST32mi }, 568 { X86::TEST64ri32, X86::TEST64mi32 }, 569 { X86::TEST8ri, X86::TEST8mi }, 570 { X86::XCHG16rr, X86::XCHG16mr }, 571 { X86::XCHG32rr, X86::XCHG32mr }, 572 { X86::XCHG64rr, X86::XCHG64mr }, 573 { X86::XCHG8rr, X86::XCHG8mr } 574 }; 575 ASSERT_SORTED(OpcodeTable); 576 OpcodeTablePtr = OpcodeTable; 577 OpcodeTableSize = ARRAY_SIZE(OpcodeTable); 578 } else if (i == 1) { 579 static const TableEntry OpcodeTable[] = { 580 { X86::CMP16rr, X86::CMP16rm }, 581 { X86::CMP32rr, X86::CMP32rm }, 582 { X86::CMP64ri32, X86::CMP64mi32 }, 583 { X86::CMP64ri8, X86::CMP64mi8 }, 584 { X86::CMP64rr, X86::CMP64rm }, 585 { X86::CMP8rr, X86::CMP8rm }, 586 { X86::CMPPDrri, X86::CMPPDrmi }, 587 { X86::CMPPSrri, X86::CMPPSrmi }, 588 { X86::CMPSDrr, X86::CMPSDrm }, 589 { X86::CMPSSrr, X86::CMPSSrm }, 590 { X86::CVTSD2SSrr, X86::CVTSD2SSrm }, 591 { X86::CVTSI2SD64rr, X86::CVTSI2SD64rm }, 592 { X86::CVTSI2SDrr, X86::CVTSI2SDrm }, 593 { X86::CVTSI2SS64rr, X86::CVTSI2SS64rm }, 594 { X86::CVTSI2SSrr, X86::CVTSI2SSrm }, 595 { X86::CVTSS2SDrr, X86::CVTSS2SDrm }, 596 { X86::CVTTSD2SI64rr, X86::CVTTSD2SI64rm }, 597 { X86::CVTTSD2SIrr, X86::CVTTSD2SIrm }, 598 { X86::CVTTSS2SI64rr, X86::CVTTSS2SI64rm }, 599 { X86::CVTTSS2SIrr, X86::CVTTSS2SIrm }, 600 { X86::FsMOVAPDrr, X86::MOVSDrm }, 601 { X86::FsMOVAPSrr, X86::MOVSSrm }, 602 { X86::IMUL16rri, X86::IMUL16rmi }, 603 { X86::IMUL16rri8, X86::IMUL16rmi8 }, 604 { X86::IMUL32rri, X86::IMUL32rmi }, 605 { X86::IMUL32rri8, X86::IMUL32rmi8 }, 606 { X86::IMUL64rr, X86::IMUL64rm }, 607 { X86::IMUL64rri32, X86::IMUL64rmi32 }, 608 { X86::IMUL64rri8, X86::IMUL64rmi8 }, 609 { X86::Int_CMPSDrr, X86::Int_CMPSDrm }, 610 { X86::Int_CMPSSrr, X86::Int_CMPSSrm }, 611 { X86::Int_COMISDrr, X86::Int_COMISDrm }, 612 { X86::Int_COMISSrr, X86::Int_COMISSrm }, 613 { X86::Int_CVTDQ2PDrr, X86::Int_CVTDQ2PDrm }, 614 { X86::Int_CVTDQ2PSrr, X86::Int_CVTDQ2PSrm }, 615 { X86::Int_CVTPD2DQrr, X86::Int_CVTPD2DQrm }, 616 { X86::Int_CVTPD2PSrr, X86::Int_CVTPD2PSrm }, 617 { X86::Int_CVTPS2DQrr, X86::Int_CVTPS2DQrm }, 618 { X86::Int_CVTPS2PDrr, X86::Int_CVTPS2PDrm }, 619 { X86::Int_CVTSD2SI64rr,X86::Int_CVTSD2SI64rm }, 620 { X86::Int_CVTSD2SIrr, X86::Int_CVTSD2SIrm }, 621 { X86::Int_CVTSD2SSrr, X86::Int_CVTSD2SSrm }, 622 { X86::Int_CVTSI2SD64rr,X86::Int_CVTSI2SD64rm }, 623 { X86::Int_CVTSI2SDrr, X86::Int_CVTSI2SDrm }, 624 { X86::Int_CVTSI2SS64rr,X86::Int_CVTSI2SS64rm }, 625 { X86::Int_CVTSI2SSrr, X86::Int_CVTSI2SSrm }, 626 { X86::Int_CVTSS2SDrr, X86::Int_CVTSS2SDrm }, 627 { X86::Int_CVTSS2SI64rr,X86::Int_CVTSS2SI64rm }, 628 { X86::Int_CVTSS2SIrr, X86::Int_CVTSS2SIrm }, 629 { X86::Int_CVTTPD2DQrr, X86::Int_CVTTPD2DQrm }, 630 { X86::Int_CVTTPS2DQrr, X86::Int_CVTTPS2DQrm }, 631 { X86::Int_CVTTSD2SI64rr,X86::Int_CVTTSD2SI64rm }, 632 { X86::Int_CVTTSD2SIrr, X86::Int_CVTTSD2SIrm }, 633 { X86::Int_CVTTSS2SI64rr,X86::Int_CVTTSS2SI64rm }, 634 { X86::Int_CVTTSS2SIrr, X86::Int_CVTTSS2SIrm }, 635 { X86::Int_UCOMISDrr, X86::Int_UCOMISDrm }, 636 { X86::Int_UCOMISSrr, X86::Int_UCOMISSrm }, 637 { X86::MOV16rr, X86::MOV16rm }, 638 { X86::MOV32rr, X86::MOV32rm }, 639 { X86::MOV64rr, X86::MOV64rm }, 640 { X86::MOV64toPQIrr, X86::MOV64toPQIrm }, 641 { X86::MOV64toSDrr, X86::MOV64toSDrm }, 642 { X86::MOV8rr, X86::MOV8rm }, 643 { X86::MOVAPDrr, X86::MOVAPDrm }, 644 { X86::MOVAPSrr, X86::MOVAPSrm }, 645 { X86::MOVDDUPrr, X86::MOVDDUPrm }, 646 { X86::MOVDI2PDIrr, X86::MOVDI2PDIrm }, 647 { X86::MOVDI2SSrr, X86::MOVDI2SSrm }, 648 { X86::MOVSD2PDrr, X86::MOVSD2PDrm }, 649 { X86::MOVSDrr, X86::MOVSDrm }, 650 { X86::MOVSHDUPrr, X86::MOVSHDUPrm }, 651 { X86::MOVSLDUPrr, X86::MOVSLDUPrm }, 652 { X86::MOVSS2PSrr, X86::MOVSS2PSrm }, 653 { X86::MOVSSrr, X86::MOVSSrm }, 654 { X86::MOVSX16rr8, X86::MOVSX16rm8 }, 655 { X86::MOVSX32rr16, X86::MOVSX32rm16 }, 656 { X86::MOVSX32rr8, X86::MOVSX32rm8 }, 657 { X86::MOVSX64rr16, X86::MOVSX64rm16 }, 658 { X86::MOVSX64rr32, X86::MOVSX64rm32 }, 659 { X86::MOVSX64rr8, X86::MOVSX64rm8 }, 660 { X86::MOVUPDrr, X86::MOVUPDrm }, 661 { X86::MOVUPSrr, X86::MOVUPSrm }, 662 { X86::MOVZX16rr8, X86::MOVZX16rm8 }, 663 { X86::MOVZX32rr16, X86::MOVZX32rm16 }, 664 { X86::MOVZX32rr8, X86::MOVZX32rm8 }, 665 { X86::MOVZX64rr16, X86::MOVZX64rm16 }, 666 { X86::MOVZX64rr8, X86::MOVZX64rm8 }, 667 { X86::PSHUFDri, X86::PSHUFDmi }, 668 { X86::PSHUFHWri, X86::PSHUFHWmi }, 669 { X86::PSHUFLWri, X86::PSHUFLWmi }, 670 { X86::PsMOVZX64rr32, X86::PsMOVZX64rm32 }, 671 { X86::TEST16rr, X86::TEST16rm }, 672 { X86::TEST32rr, X86::TEST32rm }, 673 { X86::TEST64rr, X86::TEST64rm }, 674 { X86::TEST8rr, X86::TEST8rm }, 675 // FIXME: TEST*rr EAX,EAX ---> CMP [mem], 0 676 { X86::UCOMISDrr, X86::UCOMISDrm }, 677 { X86::UCOMISSrr, X86::UCOMISSrm }, 678 { X86::XCHG16rr, X86::XCHG16rm }, 679 { X86::XCHG32rr, X86::XCHG32rm }, 680 { X86::XCHG64rr, X86::XCHG64rm }, 681 { X86::XCHG8rr, X86::XCHG8rm } 682 }; 683 ASSERT_SORTED(OpcodeTable); 684 OpcodeTablePtr = OpcodeTable; 685 OpcodeTableSize = ARRAY_SIZE(OpcodeTable); 686 } else if (i == 2) { 687 static const TableEntry OpcodeTable[] = { 688 { X86::ADC32rr, X86::ADC32rm }, 689 { X86::ADC64rr, X86::ADC64rm }, 690 { X86::ADD16rr, X86::ADD16rm }, 691 { X86::ADD32rr, X86::ADD32rm }, 692 { X86::ADD64rr, X86::ADD64rm }, 693 { X86::ADD8rr, X86::ADD8rm }, 694 { X86::ADDPDrr, X86::ADDPDrm }, 695 { X86::ADDPSrr, X86::ADDPSrm }, 696 { X86::ADDSDrr, X86::ADDSDrm }, 697 { X86::ADDSSrr, X86::ADDSSrm }, 698 { X86::ADDSUBPDrr, X86::ADDSUBPDrm }, 699 { X86::ADDSUBPSrr, X86::ADDSUBPSrm }, 700 { X86::AND16rr, X86::AND16rm }, 701 { X86::AND32rr, X86::AND32rm }, 702 { X86::AND64rr, X86::AND64rm }, 703 { X86::AND8rr, X86::AND8rm }, 704 { X86::ANDNPDrr, X86::ANDNPDrm }, 705 { X86::ANDNPSrr, X86::ANDNPSrm }, 706 { X86::ANDPDrr, X86::ANDPDrm }, 707 { X86::ANDPSrr, X86::ANDPSrm }, 708 { X86::CMOVA16rr, X86::CMOVA16rm }, 709 { X86::CMOVA32rr, X86::CMOVA32rm }, 710 { X86::CMOVA64rr, X86::CMOVA64rm }, 711 { X86::CMOVAE16rr, X86::CMOVAE16rm }, 712 { X86::CMOVAE32rr, X86::CMOVAE32rm }, 713 { X86::CMOVAE64rr, X86::CMOVAE64rm }, 714 { X86::CMOVB16rr, X86::CMOVB16rm }, 715 { X86::CMOVB32rr, X86::CMOVB32rm }, 716 { X86::CMOVB64rr, X86::CMOVB64rm }, 717 { X86::CMOVBE16rr, X86::CMOVBE16rm }, 718 { X86::CMOVBE32rr, X86::CMOVBE32rm }, 719 { X86::CMOVBE64rr, X86::CMOVBE64rm }, 720 { X86::CMOVE16rr, X86::CMOVE16rm }, 721 { X86::CMOVE32rr, X86::CMOVE32rm }, 722 { X86::CMOVE64rr, X86::CMOVE64rm }, 723 { X86::CMOVG16rr, X86::CMOVG16rm }, 724 { X86::CMOVG32rr, X86::CMOVG32rm }, 725 { X86::CMOVG64rr, X86::CMOVG64rm }, 726 { X86::CMOVGE16rr, X86::CMOVGE16rm }, 727 { X86::CMOVGE32rr, X86::CMOVGE32rm }, 728 { X86::CMOVGE64rr, X86::CMOVGE64rm }, 729 { X86::CMOVL16rr, X86::CMOVL16rm }, 730 { X86::CMOVL32rr, X86::CMOVL32rm }, 731 { X86::CMOVL64rr, X86::CMOVL64rm }, 732 { X86::CMOVLE16rr, X86::CMOVLE16rm }, 733 { X86::CMOVLE32rr, X86::CMOVLE32rm }, 734 { X86::CMOVLE64rr, X86::CMOVLE64rm }, 735 { X86::CMOVNE16rr, X86::CMOVNE16rm }, 736 { X86::CMOVNE32rr, X86::CMOVNE32rm }, 737 { X86::CMOVNE64rr, X86::CMOVNE64rm }, 738 { X86::CMOVNP16rr, X86::CMOVNP16rm }, 739 { X86::CMOVNP32rr, X86::CMOVNP32rm }, 740 { X86::CMOVNP64rr, X86::CMOVNP64rm }, 741 { X86::CMOVNS16rr, X86::CMOVNS16rm }, 742 { X86::CMOVNS32rr, X86::CMOVNS32rm }, 743 { X86::CMOVNS64rr, X86::CMOVNS64rm }, 744 { X86::CMOVP16rr, X86::CMOVP16rm }, 745 { X86::CMOVP32rr, X86::CMOVP32rm }, 746 { X86::CMOVP64rr, X86::CMOVP64rm }, 747 { X86::CMOVS16rr, X86::CMOVS16rm }, 748 { X86::CMOVS32rr, X86::CMOVS32rm }, 749 { X86::CMOVS64rr, X86::CMOVS64rm }, 750 { X86::DIVPDrr, X86::DIVPDrm }, 751 { X86::DIVPSrr, X86::DIVPSrm }, 752 { X86::DIVSDrr, X86::DIVSDrm }, 753 { X86::DIVSSrr, X86::DIVSSrm }, 754 { X86::HADDPDrr, X86::HADDPDrm }, 755 { X86::HADDPSrr, X86::HADDPSrm }, 756 { X86::HSUBPDrr, X86::HSUBPDrm }, 757 { X86::HSUBPSrr, X86::HSUBPSrm }, 758 { X86::IMUL16rr, X86::IMUL16rm }, 759 { X86::IMUL32rr, X86::IMUL32rm }, 760 { X86::MAXPDrr, X86::MAXPDrm }, 761 { X86::MAXPSrr, X86::MAXPSrm }, 762 { X86::MINPDrr, X86::MINPDrm }, 763 { X86::MINPSrr, X86::MINPSrm }, 764 { X86::MULPDrr, X86::MULPDrm }, 765 { X86::MULPSrr, X86::MULPSrm }, 766 { X86::MULSDrr, X86::MULSDrm }, 767 { X86::MULSSrr, X86::MULSSrm }, 768 { X86::OR16rr, X86::OR16rm }, 769 { X86::OR32rr, X86::OR32rm }, 770 { X86::OR64rr, X86::OR64rm }, 771 { X86::OR8rr, X86::OR8rm }, 772 { X86::ORPDrr, X86::ORPDrm }, 773 { X86::ORPSrr, X86::ORPSrm }, 774 { X86::PACKSSDWrr, X86::PACKSSDWrm }, 775 { X86::PACKSSWBrr, X86::PACKSSWBrm }, 776 { X86::PACKUSWBrr, X86::PACKUSWBrm }, 777 { X86::PADDBrr, X86::PADDBrm }, 778 { X86::PADDDrr, X86::PADDDrm }, 779 { X86::PADDQrr, X86::PADDQrm }, 780 { X86::PADDSBrr, X86::PADDSBrm }, 781 { X86::PADDSWrr, X86::PADDSWrm }, 782 { X86::PADDWrr, X86::PADDWrm }, 783 { X86::PANDNrr, X86::PANDNrm }, 784 { X86::PANDrr, X86::PANDrm }, 785 { X86::PAVGBrr, X86::PAVGBrm }, 786 { X86::PAVGWrr, X86::PAVGWrm }, 787 { X86::PCMPEQBrr, X86::PCMPEQBrm }, 788 { X86::PCMPEQDrr, X86::PCMPEQDrm }, 789 { X86::PCMPEQWrr, X86::PCMPEQWrm }, 790 { X86::PCMPGTBrr, X86::PCMPGTBrm }, 791 { X86::PCMPGTDrr, X86::PCMPGTDrm }, 792 { X86::PCMPGTWrr, X86::PCMPGTWrm }, 793 { X86::PINSRWrri, X86::PINSRWrmi }, 794 { X86::PMADDWDrr, X86::PMADDWDrm }, 795 { X86::PMAXSWrr, X86::PMAXSWrm }, 796 { X86::PMAXUBrr, X86::PMAXUBrm }, 797 { X86::PMINSWrr, X86::PMINSWrm }, 798 { X86::PMINUBrr, X86::PMINUBrm }, 799 { X86::PMULHUWrr, X86::PMULHUWrm }, 800 { X86::PMULHWrr, X86::PMULHWrm }, 801 { X86::PMULLWrr, X86::PMULLWrm }, 802 { X86::PMULUDQrr, X86::PMULUDQrm }, 803 { X86::PORrr, X86::PORrm }, 804 { X86::PSADBWrr, X86::PSADBWrm }, 805 { X86::PSLLDrr, X86::PSLLDrm }, 806 { X86::PSLLQrr, X86::PSLLQrm }, 807 { X86::PSLLWrr, X86::PSLLWrm }, 808 { X86::PSRADrr, X86::PSRADrm }, 809 { X86::PSRAWrr, X86::PSRAWrm }, 810 { X86::PSRLDrr, X86::PSRLDrm }, 811 { X86::PSRLQrr, X86::PSRLQrm }, 812 { X86::PSRLWrr, X86::PSRLWrm }, 813 { X86::PSUBBrr, X86::PSUBBrm }, 814 { X86::PSUBDrr, X86::PSUBDrm }, 815 { X86::PSUBSBrr, X86::PSUBSBrm }, 816 { X86::PSUBSWrr, X86::PSUBSWrm }, 817 { X86::PSUBWrr, X86::PSUBWrm }, 818 { X86::PUNPCKHBWrr, X86::PUNPCKHBWrm }, 819 { X86::PUNPCKHDQrr, X86::PUNPCKHDQrm }, 820 { X86::PUNPCKHQDQrr, X86::PUNPCKHQDQrm }, 821 { X86::PUNPCKHWDrr, X86::PUNPCKHWDrm }, 822 { X86::PUNPCKLBWrr, X86::PUNPCKLBWrm }, 823 { X86::PUNPCKLDQrr, X86::PUNPCKLDQrm }, 824 { X86::PUNPCKLQDQrr, X86::PUNPCKLQDQrm }, 825 { X86::PUNPCKLWDrr, X86::PUNPCKLWDrm }, 826 { X86::PXORrr, X86::PXORrm }, 827 { X86::RCPPSr, X86::RCPPSm }, 828 { X86::RSQRTPSr, X86::RSQRTPSm }, 829 { X86::SBB32rr, X86::SBB32rm }, 830 { X86::SBB64rr, X86::SBB64rm }, 831 { X86::SHUFPDrri, X86::SHUFPDrmi }, 832 { X86::SHUFPSrri, X86::SHUFPSrmi }, 833 { X86::SQRTPDr, X86::SQRTPDm }, 834 { X86::SQRTPSr, X86::SQRTPSm }, 835 { X86::SQRTSDr, X86::SQRTSDm }, 836 { X86::SQRTSSr, X86::SQRTSSm }, 837 { X86::SUB16rr, X86::SUB16rm }, 838 { X86::SUB32rr, X86::SUB32rm }, 839 { X86::SUB64rr, X86::SUB64rm }, 840 { X86::SUB8rr, X86::SUB8rm }, 841 { X86::SUBPDrr, X86::SUBPDrm }, 842 { X86::SUBPSrr, X86::SUBPSrm }, 843 { X86::SUBSDrr, X86::SUBSDrm }, 844 { X86::SUBSSrr, X86::SUBSSrm }, 845 // FIXME: TEST*rr -> swapped operand of TEST*mr. 846 { X86::UNPCKHPDrr, X86::UNPCKHPDrm }, 847 { X86::UNPCKHPSrr, X86::UNPCKHPSrm }, 848 { X86::UNPCKLPDrr, X86::UNPCKLPDrm }, 849 { X86::UNPCKLPSrr, X86::UNPCKLPSrm }, 850 { X86::XOR16rr, X86::XOR16rm }, 851 { X86::XOR32rr, X86::XOR32rm }, 852 { X86::XOR64rr, X86::XOR64rm }, 853 { X86::XOR8rr, X86::XOR8rm }, 854 { X86::XORPDrr, X86::XORPDrm }, 855 { X86::XORPSrr, X86::XORPSrm } 856 }; 857 ASSERT_SORTED(OpcodeTable); 858 OpcodeTablePtr = OpcodeTable; 859 OpcodeTableSize = ARRAY_SIZE(OpcodeTable); 860 } 861 862 // If table selected... 863 if (OpcodeTablePtr) { 864 // Find the Opcode to fuse 865 unsigned fromOpcode = MI->getOpcode(); 866 // Lookup fromOpcode in table 867 if (const TableEntry *Entry = TableLookup(OpcodeTablePtr, OpcodeTableSize, 868 fromOpcode)) { 869 if (isTwoAddrFold) 870 NewMI = FuseTwoAddrInst(Entry->to, FrameIndex, MI, TII); 871 else 872 NewMI = FuseInst(Entry->to, i, FrameIndex, MI, TII); 873 NewMI->copyKillDeadInfo(MI); 874 return NewMI; 875 } 876 } 877 878 // No fusion 879 if (PrintFailedFusing) 880 cerr << "We failed to fuse (" 881 << ((i == 1) ? "r" : "s") << "): " << *MI; 882 return NULL; 883} 884 885 886const unsigned *X86RegisterInfo::getCalleeSavedRegs() const { 887 static const unsigned CalleeSavedRegs32Bit[] = { 888 X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0 889 }; 890 static const unsigned CalleeSavedRegs64Bit[] = { 891 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 892 }; 893 894 return Is64Bit ? CalleeSavedRegs64Bit : CalleeSavedRegs32Bit; 895} 896 897const TargetRegisterClass* const* 898X86RegisterInfo::getCalleeSavedRegClasses() const { 899 static const TargetRegisterClass * const CalleeSavedRegClasses32Bit[] = { 900 &X86::GR32RegClass, &X86::GR32RegClass, 901 &X86::GR32RegClass, &X86::GR32RegClass, 0 902 }; 903 static const TargetRegisterClass * const CalleeSavedRegClasses64Bit[] = { 904 &X86::GR64RegClass, &X86::GR64RegClass, 905 &X86::GR64RegClass, &X86::GR64RegClass, 906 &X86::GR64RegClass, &X86::GR64RegClass, 0 907 }; 908 909 return Is64Bit ? CalleeSavedRegClasses64Bit : CalleeSavedRegClasses32Bit; 910} 911 912BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { 913 BitVector Reserved(getNumRegs()); 914 Reserved.set(X86::RSP); 915 Reserved.set(X86::ESP); 916 Reserved.set(X86::SP); 917 Reserved.set(X86::SPL); 918 if (hasFP(MF)) { 919 Reserved.set(X86::RBP); 920 Reserved.set(X86::EBP); 921 Reserved.set(X86::BP); 922 Reserved.set(X86::BPL); 923 } 924 return Reserved; 925} 926 927//===----------------------------------------------------------------------===// 928// Stack Frame Processing methods 929//===----------------------------------------------------------------------===// 930 931// hasFP - Return true if the specified function should have a dedicated frame 932// pointer register. This is true if the function has variable sized allocas or 933// if frame pointer elimination is disabled. 934// 935bool X86RegisterInfo::hasFP(const MachineFunction &MF) const { 936 return (NoFramePointerElim || 937 MF.getFrameInfo()->hasVarSizedObjects() || 938 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer()); 939} 940 941void X86RegisterInfo:: 942eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, 943 MachineBasicBlock::iterator I) const { 944 if (hasFP(MF)) { 945 // If we have a frame pointer, turn the adjcallstackup instruction into a 946 // 'sub ESP, <amt>' and the adjcallstackdown instruction into 'add ESP, 947 // <amt>' 948 MachineInstr *Old = I; 949 uint64_t Amount = Old->getOperand(0).getImm(); 950 if (Amount != 0) { 951 // We need to keep the stack aligned properly. To do this, we round the 952 // amount of space needed for the outgoing arguments up to the next 953 // alignment boundary. 954 unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment(); 955 Amount = (Amount+Align-1)/Align*Align; 956 957 MachineInstr *New = 0; 958 if (Old->getOpcode() == X86::ADJCALLSTACKDOWN) { 959 New=BuildMI(TII.get(Is64Bit ? X86::SUB64ri32 : X86::SUB32ri), StackPtr) 960 .addReg(StackPtr).addImm(Amount); 961 } else { 962 assert(Old->getOpcode() == X86::ADJCALLSTACKUP); 963 // factor out the amount the callee already popped. 964 uint64_t CalleeAmt = Old->getOperand(1).getImm(); 965 Amount -= CalleeAmt; 966 if (Amount) { 967 unsigned Opc = (Amount < 128) ? 968 (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) : 969 (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri); 970 New = BuildMI(TII.get(Opc), StackPtr) 971 .addReg(StackPtr).addImm(Amount); 972 } 973 } 974 975 // Replace the pseudo instruction with a new instruction... 976 if (New) MBB.insert(I, New); 977 } 978 } else if (I->getOpcode() == X86::ADJCALLSTACKUP) { 979 // If we are performing frame pointer elimination and if the callee pops 980 // something off the stack pointer, add it back. We do this until we have 981 // more advanced stack pointer tracking ability. 982 if (uint64_t CalleeAmt = I->getOperand(1).getImm()) { 983 unsigned Opc = (CalleeAmt < 128) ? 984 (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) : 985 (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri); 986 MachineInstr *New = 987 BuildMI(TII.get(Opc), StackPtr).addReg(StackPtr).addImm(CalleeAmt); 988 MBB.insert(I, New); 989 } 990 } 991 992 MBB.erase(I); 993} 994 995void X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 996 int SPAdj, RegScavenger *RS) const{ 997 assert(SPAdj == 0 && "Unexpected"); 998 999 unsigned i = 0; 1000 MachineInstr &MI = *II; 1001 MachineFunction &MF = *MI.getParent()->getParent(); 1002 while (!MI.getOperand(i).isFrameIndex()) { 1003 ++i; 1004 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); 1005 } 1006 1007 int FrameIndex = MI.getOperand(i).getFrameIndex(); 1008 // This must be part of a four operand memory reference. Replace the 1009 // FrameIndex with base register with EBP. Add an offset to the offset. 1010 MI.getOperand(i).ChangeToRegister(hasFP(MF) ? FramePtr : StackPtr, false); 1011 1012 // Now add the frame object offset to the offset from EBP. 1013 int64_t Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex) + 1014 MI.getOperand(i+3).getImm()+SlotSize; 1015 1016 if (!hasFP(MF)) 1017 Offset += MF.getFrameInfo()->getStackSize(); 1018 else 1019 Offset += SlotSize; // Skip the saved EBP 1020 1021 MI.getOperand(i+3).ChangeToImmediate(Offset); 1022} 1023 1024void 1025X86RegisterInfo::processFunctionBeforeFrameFinalized(MachineFunction &MF) const{ 1026 if (hasFP(MF)) { 1027 // Create a frame entry for the EBP register that must be saved. 1028 int FrameIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, 1029 (int)SlotSize * -2); 1030 assert(FrameIdx == MF.getFrameInfo()->getObjectIndexBegin() && 1031 "Slot for EBP register must be last in order to be found!"); 1032 } 1033} 1034 1035/// emitSPUpdate - Emit a series of instructions to increment / decrement the 1036/// stack pointer by a constant value. 1037static 1038void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 1039 unsigned StackPtr, int64_t NumBytes, bool Is64Bit, 1040 const TargetInstrInfo &TII) { 1041 bool isSub = NumBytes < 0; 1042 uint64_t Offset = isSub ? -NumBytes : NumBytes; 1043 unsigned Opc = isSub 1044 ? ((Offset < 128) ? 1045 (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) : 1046 (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri)) 1047 : ((Offset < 128) ? 1048 (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) : 1049 (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri)); 1050 uint64_t Chunk = (1LL << 31) - 1; 1051 1052 while (Offset) { 1053 uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset; 1054 BuildMI(MBB, MBBI, TII.get(Opc), StackPtr).addReg(StackPtr).addImm(ThisVal); 1055 Offset -= ThisVal; 1056 } 1057} 1058 1059void X86RegisterInfo::emitPrologue(MachineFunction &MF) const { 1060 MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB 1061 MachineBasicBlock::iterator MBBI = MBB.begin(); 1062 MachineFrameInfo *MFI = MF.getFrameInfo(); 1063 unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment(); 1064 const Function* Fn = MF.getFunction(); 1065 const X86Subtarget* Subtarget = &MF.getTarget().getSubtarget<X86Subtarget>(); 1066 MachineInstr *MI; 1067 MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); 1068 1069 // Prepare for frame info. 1070 unsigned FrameLabelId = 0, StartLabelId = 0; 1071 1072 // Get the number of bytes to allocate from the FrameInfo 1073 uint64_t NumBytes = MFI->getStackSize(); 1074 1075 if (MMI && MMI->needsFrameInfo()) { 1076 // Mark function start 1077 StartLabelId = MMI->NextLabelID(); 1078 BuildMI(MBB, MBBI, TII.get(X86::LABEL)).addImm(StartLabelId); 1079 } 1080 1081 if (NumBytes) { // adjust stack pointer: ESP -= numbytes 1082 if (NumBytes >= 4096 && Subtarget->isTargetCygMing()) { 1083 // Check, whether EAX is livein for this function 1084 bool isEAXAlive = false; 1085 for (MachineFunction::livein_iterator II = MF.livein_begin(), 1086 EE = MF.livein_end(); (II != EE) && !isEAXAlive; ++II) { 1087 unsigned Reg = II->first; 1088 isEAXAlive = (Reg == X86::EAX || Reg == X86::AX || 1089 Reg == X86::AH || Reg == X86::AL); 1090 } 1091 1092 // Function prologue calls _alloca to probe the stack when allocating 1093 // more than 4k bytes in one go. Touching the stack at 4K increments is 1094 // necessary to ensure that the guard pages used by the OS virtual memory 1095 // manager are allocated in correct sequence. 1096 if (!isEAXAlive) { 1097 MI = BuildMI(TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes); 1098 MBB.insert(MBBI, MI); 1099 MI = BuildMI(TII.get(X86::CALLpcrel32)).addExternalSymbol("_alloca"); 1100 MBB.insert(MBBI, MI); 1101 } else { 1102 // Save EAX 1103 MI = BuildMI(TII.get(X86::PUSH32r), X86::EAX); 1104 MBB.insert(MBBI, MI); 1105 // Allocate NumBytes-4 bytes on stack. We'll also use 4 already 1106 // allocated bytes for EAX. 1107 MI = BuildMI(TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes-4); 1108 MBB.insert(MBBI, MI); 1109 MI = BuildMI(TII.get(X86::CALLpcrel32)).addExternalSymbol("_alloca"); 1110 MBB.insert(MBBI, MI); 1111 // Restore EAX 1112 MI = addRegOffset(BuildMI(TII.get(X86::MOV32rm), X86::EAX), 1113 StackPtr, NumBytes-4); 1114 MBB.insert(MBBI, MI); 1115 } 1116 } else { 1117 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, TII); 1118 } 1119 } 1120 1121 if (MMI && MMI->needsFrameInfo()) { 1122 // Mark effective beginning of when frame pointer becomes valid. 1123 FrameLabelId = MMI->NextLabelID(); 1124 BuildMI(MBB, MBBI, TII.get(X86::LABEL)).addImm(FrameLabelId); 1125 } 1126 1127 if (hasFP(MF)) { 1128 // Get the offset of the stack slot for the EBP register... which is 1129 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized. 1130 int64_t EBPOffset = 1131 MFI->getObjectOffset(MFI->getObjectIndexBegin())+SlotSize; 1132 // Update the frame offset adjustment. 1133 MFI->setOffsetAdjustment(SlotSize-NumBytes); 1134 1135 // Save EBP into the appropriate stack slot... 1136 // mov [ESP-<offset>], EBP 1137 MI = addRegOffset(BuildMI(TII.get(Is64Bit ? X86::MOV64mr : X86::MOV32mr)), 1138 StackPtr, EBPOffset+NumBytes).addReg(FramePtr); 1139 MBB.insert(MBBI, MI); 1140 1141 // Update EBP with the new base value... 1142 if (NumBytes == SlotSize) // mov EBP, ESP 1143 MI = BuildMI(TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr). 1144 addReg(StackPtr); 1145 else // lea EBP, [ESP+StackSize] 1146 MI = addRegOffset(BuildMI(TII.get(Is64Bit ? X86::LEA64r : X86::LEA32r), 1147 FramePtr), StackPtr, NumBytes-SlotSize); 1148 1149 MBB.insert(MBBI, MI); 1150 } 1151 1152 if (MMI && MMI->needsFrameInfo()) { 1153 std::vector<MachineMove> &Moves = MMI->getFrameMoves(); 1154 const TargetAsmInfo *TAI = MF.getTarget().getTargetAsmInfo(); 1155 1156 // Calculate amount of bytes used for return address storing 1157 int stackGrowth = 1158 (MF.getTarget().getFrameInfo()->getStackGrowthDirection() == 1159 TargetFrameInfo::StackGrowsUp ? 1160 TAI->getAddressSize() : -TAI->getAddressSize()); 1161 1162 if (NumBytes) { 1163 // Show update of SP. 1164 if (hasFP(MF)) { 1165 // Adjust SP 1166 MachineLocation SPDst(MachineLocation::VirtualFP); 1167 MachineLocation SPSrc(MachineLocation::VirtualFP, 2*stackGrowth); 1168 Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc)); 1169 } else { 1170 MachineLocation SPDst(MachineLocation::VirtualFP); 1171 MachineLocation SPSrc(MachineLocation::VirtualFP, -NumBytes+stackGrowth); 1172 Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc)); 1173 } 1174 } else { 1175 //FIXME: Verify & implement for FP 1176 MachineLocation SPDst(StackPtr); 1177 MachineLocation SPSrc(StackPtr, stackGrowth); 1178 Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc)); 1179 } 1180 1181 // Add callee saved registers to move list. 1182 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); 1183 for (unsigned I = 0, E = CSI.size(); I != E; ++I) { 1184 int64_t Offset = MFI->getObjectOffset(CSI[I].getFrameIdx()); 1185 unsigned Reg = CSI[I].getReg(); 1186 MachineLocation CSDst(MachineLocation::VirtualFP, Offset); 1187 MachineLocation CSSrc(Reg); 1188 Moves.push_back(MachineMove(FrameLabelId, CSDst, CSSrc)); 1189 } 1190 1191 // Mark effective beginning of when frame pointer is ready. 1192 unsigned ReadyLabelId = MMI->NextLabelID(); 1193 BuildMI(MBB, MBBI, TII.get(X86::LABEL)).addImm(ReadyLabelId); 1194 1195 if (hasFP(MF)) { 1196 // Save FP 1197 MachineLocation FPDst(MachineLocation::VirtualFP, 2*stackGrowth); 1198 MachineLocation FPSrc(FramePtr); 1199 Moves.push_back(MachineMove(ReadyLabelId, FPDst, FPSrc)); 1200 } 1201 1202 MachineLocation FPDst(hasFP(MF) ? FramePtr : StackPtr); 1203 MachineLocation FPSrc(MachineLocation::VirtualFP); 1204 Moves.push_back(MachineMove(ReadyLabelId, FPDst, FPSrc)); 1205 } 1206 1207 // If it's main() on Cygwin\Mingw32 we should align stack as well 1208 if (Fn->hasExternalLinkage() && Fn->getName() == "main" && 1209 Subtarget->isTargetCygMing()) { 1210 MI= BuildMI(TII.get(X86::AND32ri), X86::ESP) 1211 .addReg(X86::ESP).addImm(-Align); 1212 MBB.insert(MBBI, MI); 1213 1214 // Probe the stack 1215 MI = BuildMI(TII.get(X86::MOV32ri), X86::EAX).addImm(Align); 1216 MBB.insert(MBBI, MI); 1217 MI = BuildMI(TII.get(X86::CALLpcrel32)).addExternalSymbol("_alloca"); 1218 MBB.insert(MBBI, MI); 1219 } 1220} 1221 1222void X86RegisterInfo::emitEpilogue(MachineFunction &MF, 1223 MachineBasicBlock &MBB) const { 1224 const MachineFrameInfo *MFI = MF.getFrameInfo(); 1225 MachineBasicBlock::iterator MBBI = prior(MBB.end()); 1226 1227 switch (MBBI->getOpcode()) { 1228 case X86::RET: 1229 case X86::RETI: 1230 case X86::TAILJMPd: 1231 case X86::TAILJMPr: 1232 case X86::TAILJMPm: break; // These are ok 1233 default: 1234 assert(0 && "Can only insert epilog into returning blocks"); 1235 } 1236 1237 if (hasFP(MF)) { 1238 // mov ESP, EBP 1239 BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),StackPtr). 1240 addReg(FramePtr); 1241 1242 // pop EBP 1243 BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr); 1244 } else { 1245 // Get the number of bytes allocated from the FrameInfo. 1246 uint64_t NumBytes = MFI->getStackSize(); 1247 1248 if (NumBytes) { // adjust stack pointer back: ESP += numbytes 1249 // If there is an ADD32ri or SUB32ri of ESP immediately before this 1250 // instruction, merge the two instructions. 1251 if (MBBI != MBB.begin()) { 1252 MachineBasicBlock::iterator PI = prior(MBBI); 1253 unsigned Opc = PI->getOpcode(); 1254 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 1255 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 1256 PI->getOperand(0).getReg() == StackPtr) { 1257 NumBytes += PI->getOperand(2).getImm(); 1258 MBB.erase(PI); 1259 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 1260 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 1261 PI->getOperand(0).getReg() == StackPtr) { 1262 NumBytes -= PI->getOperand(2).getImm(); 1263 MBB.erase(PI); 1264 } 1265 } 1266 1267 if (NumBytes) 1268 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII); 1269 } 1270 } 1271} 1272 1273unsigned X86RegisterInfo::getRARegister() const { 1274 if (Is64Bit) 1275 return X86::RIP; // Should have dwarf #16 1276 else 1277 return X86::EIP; // Should have dwarf #8 1278} 1279 1280unsigned X86RegisterInfo::getFrameRegister(MachineFunction &MF) const { 1281 return hasFP(MF) ? FramePtr : StackPtr; 1282} 1283 1284void X86RegisterInfo::getInitialFrameState(std::vector<MachineMove> &Moves) 1285 const { 1286 // Calculate amount of bytes used for return address storing 1287 int stackGrowth = (Is64Bit ? -8 : -4); 1288 1289 // Initial state of the frame pointer is esp+4. 1290 MachineLocation Dst(MachineLocation::VirtualFP); 1291 MachineLocation Src(StackPtr, stackGrowth); 1292 Moves.push_back(MachineMove(0, Dst, Src)); 1293 1294 // Add return address to move list 1295 MachineLocation CSDst(StackPtr, stackGrowth); 1296 MachineLocation CSSrc(getRARegister()); 1297 Moves.push_back(MachineMove(0, CSDst, CSSrc)); 1298} 1299 1300unsigned X86RegisterInfo::getEHExceptionRegister() const { 1301 assert(0 && "What is the exception register"); 1302 return 0; 1303} 1304 1305unsigned X86RegisterInfo::getEHHandlerRegister() const { 1306 assert(0 && "What is the exception handler register"); 1307 return 0; 1308} 1309 1310namespace llvm { 1311unsigned getX86SubSuperRegister(unsigned Reg, MVT::ValueType VT, bool High) { 1312 switch (VT) { 1313 default: return Reg; 1314 case MVT::i8: 1315 if (High) { 1316 switch (Reg) { 1317 default: return 0; 1318 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1319 return X86::AH; 1320 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1321 return X86::DH; 1322 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1323 return X86::CH; 1324 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1325 return X86::BH; 1326 } 1327 } else { 1328 switch (Reg) { 1329 default: return 0; 1330 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1331 return X86::AL; 1332 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1333 return X86::DL; 1334 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1335 return X86::CL; 1336 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1337 return X86::BL; 1338 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1339 return X86::SIL; 1340 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1341 return X86::DIL; 1342 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1343 return X86::BPL; 1344 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1345 return X86::SPL; 1346 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1347 return X86::R8B; 1348 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1349 return X86::R9B; 1350 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1351 return X86::R10B; 1352 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1353 return X86::R11B; 1354 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1355 return X86::R12B; 1356 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1357 return X86::R13B; 1358 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1359 return X86::R14B; 1360 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1361 return X86::R15B; 1362 } 1363 } 1364 case MVT::i16: 1365 switch (Reg) { 1366 default: return Reg; 1367 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1368 return X86::AX; 1369 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1370 return X86::DX; 1371 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1372 return X86::CX; 1373 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1374 return X86::BX; 1375 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1376 return X86::SI; 1377 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1378 return X86::DI; 1379 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1380 return X86::BP; 1381 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1382 return X86::SP; 1383 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1384 return X86::R8W; 1385 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1386 return X86::R9W; 1387 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1388 return X86::R10W; 1389 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1390 return X86::R11W; 1391 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1392 return X86::R12W; 1393 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1394 return X86::R13W; 1395 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1396 return X86::R14W; 1397 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1398 return X86::R15W; 1399 } 1400 case MVT::i32: 1401 switch (Reg) { 1402 default: return Reg; 1403 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1404 return X86::EAX; 1405 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1406 return X86::EDX; 1407 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1408 return X86::ECX; 1409 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1410 return X86::EBX; 1411 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1412 return X86::ESI; 1413 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1414 return X86::EDI; 1415 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1416 return X86::EBP; 1417 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1418 return X86::ESP; 1419 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1420 return X86::R8D; 1421 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1422 return X86::R9D; 1423 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1424 return X86::R10D; 1425 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1426 return X86::R11D; 1427 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1428 return X86::R12D; 1429 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1430 return X86::R13D; 1431 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1432 return X86::R14D; 1433 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1434 return X86::R15D; 1435 } 1436 case MVT::i64: 1437 switch (Reg) { 1438 default: return Reg; 1439 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 1440 return X86::RAX; 1441 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 1442 return X86::RDX; 1443 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 1444 return X86::RCX; 1445 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 1446 return X86::RBX; 1447 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 1448 return X86::RSI; 1449 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 1450 return X86::RDI; 1451 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 1452 return X86::RBP; 1453 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 1454 return X86::RSP; 1455 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 1456 return X86::R8; 1457 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 1458 return X86::R9; 1459 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 1460 return X86::R10; 1461 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 1462 return X86::R11; 1463 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 1464 return X86::R12; 1465 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 1466 return X86::R13; 1467 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 1468 return X86::R14; 1469 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 1470 return X86::R15; 1471 } 1472 } 1473 1474 return Reg; 1475} 1476} 1477 1478#include "X86GenRegisterInfo.inc" 1479 1480