X86MCCodeEmitter.cpp revision 31d157ae1ac2cd9c787dc3c1d28e64c682803844
1//===-- X86MCCodeEmitter.cpp - Convert X86 code to machine code -----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the X86MCCodeEmitter class. 11// 12//===----------------------------------------------------------------------===// 13 14#define DEBUG_TYPE "mccodeemitter" 15#include "MCTargetDesc/X86MCTargetDesc.h" 16#include "MCTargetDesc/X86BaseInfo.h" 17#include "MCTargetDesc/X86FixupKinds.h" 18#include "llvm/MC/MCCodeEmitter.h" 19#include "llvm/MC/MCExpr.h" 20#include "llvm/MC/MCInst.h" 21#include "llvm/MC/MCInstrInfo.h" 22#include "llvm/MC/MCRegisterInfo.h" 23#include "llvm/MC/MCSubtargetInfo.h" 24#include "llvm/MC/MCSymbol.h" 25#include "llvm/Support/raw_ostream.h" 26 27using namespace llvm; 28 29namespace { 30class X86MCCodeEmitter : public MCCodeEmitter { 31 X86MCCodeEmitter(const X86MCCodeEmitter &); // DO NOT IMPLEMENT 32 void operator=(const X86MCCodeEmitter &); // DO NOT IMPLEMENT 33 const MCInstrInfo &MCII; 34 const MCSubtargetInfo &STI; 35 MCContext &Ctx; 36public: 37 X86MCCodeEmitter(const MCInstrInfo &mcii, const MCSubtargetInfo &sti, 38 MCContext &ctx) 39 : MCII(mcii), STI(sti), Ctx(ctx) { 40 } 41 42 ~X86MCCodeEmitter() {} 43 44 bool is64BitMode() const { 45 // FIXME: Can tablegen auto-generate this? 46 return (STI.getFeatureBits() & X86::Mode64Bit) != 0; 47 } 48 49 static unsigned GetX86RegNum(const MCOperand &MO) { 50 return X86_MC::getX86RegNum(MO.getReg()); 51 } 52 53 // On regular x86, both XMM0-XMM7 and XMM8-XMM15 are encoded in the range 54 // 0-7 and the difference between the 2 groups is given by the REX prefix. 55 // In the VEX prefix, registers are seen sequencially from 0-15 and encoded 56 // in 1's complement form, example: 57 // 58 // ModRM field => XMM9 => 1 59 // VEX.VVVV => XMM9 => ~9 60 // 61 // See table 4-35 of Intel AVX Programming Reference for details. 62 static unsigned char getVEXRegisterEncoding(const MCInst &MI, 63 unsigned OpNum) { 64 unsigned SrcReg = MI.getOperand(OpNum).getReg(); 65 unsigned SrcRegNum = GetX86RegNum(MI.getOperand(OpNum)); 66 if (X86II::isX86_64ExtendedReg(SrcReg)) 67 SrcRegNum |= 8; 68 69 // The registers represented through VEX_VVVV should 70 // be encoded in 1's complement form. 71 return (~SrcRegNum) & 0xf; 72 } 73 74 void EmitByte(unsigned char C, unsigned &CurByte, raw_ostream &OS) const { 75 OS << (char)C; 76 ++CurByte; 77 } 78 79 void EmitConstant(uint64_t Val, unsigned Size, unsigned &CurByte, 80 raw_ostream &OS) const { 81 // Output the constant in little endian byte order. 82 for (unsigned i = 0; i != Size; ++i) { 83 EmitByte(Val & 255, CurByte, OS); 84 Val >>= 8; 85 } 86 } 87 88 void EmitImmediate(const MCOperand &Disp, SMLoc Loc, 89 unsigned ImmSize, MCFixupKind FixupKind, 90 unsigned &CurByte, raw_ostream &OS, 91 SmallVectorImpl<MCFixup> &Fixups, 92 int ImmOffset = 0) const; 93 94 inline static unsigned char ModRMByte(unsigned Mod, unsigned RegOpcode, 95 unsigned RM) { 96 assert(Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!"); 97 return RM | (RegOpcode << 3) | (Mod << 6); 98 } 99 100 void EmitRegModRMByte(const MCOperand &ModRMReg, unsigned RegOpcodeFld, 101 unsigned &CurByte, raw_ostream &OS) const { 102 EmitByte(ModRMByte(3, RegOpcodeFld, GetX86RegNum(ModRMReg)), CurByte, OS); 103 } 104 105 void EmitSIBByte(unsigned SS, unsigned Index, unsigned Base, 106 unsigned &CurByte, raw_ostream &OS) const { 107 // SIB byte is in the same format as the ModRMByte. 108 EmitByte(ModRMByte(SS, Index, Base), CurByte, OS); 109 } 110 111 112 void EmitMemModRMByte(const MCInst &MI, unsigned Op, 113 unsigned RegOpcodeField, 114 uint64_t TSFlags, unsigned &CurByte, raw_ostream &OS, 115 SmallVectorImpl<MCFixup> &Fixups) const; 116 117 void EncodeInstruction(const MCInst &MI, raw_ostream &OS, 118 SmallVectorImpl<MCFixup> &Fixups) const; 119 120 void EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand, 121 const MCInst &MI, const MCInstrDesc &Desc, 122 raw_ostream &OS) const; 123 124 void EmitSegmentOverridePrefix(uint64_t TSFlags, unsigned &CurByte, 125 int MemOperand, const MCInst &MI, 126 raw_ostream &OS) const; 127 128 void EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand, 129 const MCInst &MI, const MCInstrDesc &Desc, 130 raw_ostream &OS) const; 131}; 132 133} // end anonymous namespace 134 135 136MCCodeEmitter *llvm::createX86MCCodeEmitter(const MCInstrInfo &MCII, 137 const MCSubtargetInfo &STI, 138 MCContext &Ctx) { 139 return new X86MCCodeEmitter(MCII, STI, Ctx); 140} 141 142/// isDisp8 - Return true if this signed displacement fits in a 8-bit 143/// sign-extended field. 144static bool isDisp8(int Value) { 145 return Value == (signed char)Value; 146} 147 148/// getImmFixupKind - Return the appropriate fixup kind to use for an immediate 149/// in an instruction with the specified TSFlags. 150static MCFixupKind getImmFixupKind(uint64_t TSFlags) { 151 unsigned Size = X86II::getSizeOfImm(TSFlags); 152 bool isPCRel = X86II::isImmPCRel(TSFlags); 153 154 return MCFixup::getKindForSize(Size, isPCRel); 155} 156 157/// Is32BitMemOperand - Return true if the specified instruction with a memory 158/// operand should emit the 0x67 prefix byte in 64-bit mode due to a 32-bit 159/// memory operand. Op specifies the operand # of the memoperand. 160static bool Is32BitMemOperand(const MCInst &MI, unsigned Op) { 161 const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg); 162 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg); 163 164 if ((BaseReg.getReg() != 0 && 165 X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg.getReg())) || 166 (IndexReg.getReg() != 0 && 167 X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg.getReg()))) 168 return true; 169 return false; 170} 171 172/// StartsWithGlobalOffsetTable - Check if this expression starts with 173/// _GLOBAL_OFFSET_TABLE_ and if it is of the form 174/// _GLOBAL_OFFSET_TABLE_-symbol. This is needed to support PIC on ELF 175/// i386 as _GLOBAL_OFFSET_TABLE_ is magical. We check only simple case that 176/// are know to be used: _GLOBAL_OFFSET_TABLE_ by itself or at the start 177/// of a binary expression. 178enum GlobalOffsetTableExprKind { 179 GOT_None, 180 GOT_Normal, 181 GOT_SymDiff 182}; 183static GlobalOffsetTableExprKind 184StartsWithGlobalOffsetTable(const MCExpr *Expr) { 185 const MCExpr *RHS = 0; 186 if (Expr->getKind() == MCExpr::Binary) { 187 const MCBinaryExpr *BE = static_cast<const MCBinaryExpr *>(Expr); 188 Expr = BE->getLHS(); 189 RHS = BE->getRHS(); 190 } 191 192 if (Expr->getKind() != MCExpr::SymbolRef) 193 return GOT_None; 194 195 const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr); 196 const MCSymbol &S = Ref->getSymbol(); 197 if (S.getName() != "_GLOBAL_OFFSET_TABLE_") 198 return GOT_None; 199 if (RHS && RHS->getKind() == MCExpr::SymbolRef) 200 return GOT_SymDiff; 201 return GOT_Normal; 202} 203 204void X86MCCodeEmitter:: 205EmitImmediate(const MCOperand &DispOp, SMLoc Loc, unsigned Size, 206 MCFixupKind FixupKind, unsigned &CurByte, raw_ostream &OS, 207 SmallVectorImpl<MCFixup> &Fixups, int ImmOffset) const { 208 const MCExpr *Expr = NULL; 209 if (DispOp.isImm()) { 210 // If this is a simple integer displacement that doesn't require a 211 // relocation, emit it now. 212 if (FixupKind != FK_PCRel_1 && 213 FixupKind != FK_PCRel_2 && 214 FixupKind != FK_PCRel_4) { 215 EmitConstant(DispOp.getImm()+ImmOffset, Size, CurByte, OS); 216 return; 217 } 218 Expr = MCConstantExpr::Create(DispOp.getImm(), Ctx); 219 } else { 220 Expr = DispOp.getExpr(); 221 } 222 223 // If we have an immoffset, add it to the expression. 224 if ((FixupKind == FK_Data_4 || 225 FixupKind == FK_Data_8 || 226 FixupKind == MCFixupKind(X86::reloc_signed_4byte))) { 227 GlobalOffsetTableExprKind Kind = StartsWithGlobalOffsetTable(Expr); 228 if (Kind != GOT_None) { 229 assert(ImmOffset == 0); 230 231 FixupKind = MCFixupKind(X86::reloc_global_offset_table); 232 if (Kind == GOT_Normal) 233 ImmOffset = CurByte; 234 } else if (Expr->getKind() == MCExpr::SymbolRef) { 235 const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr); 236 if (Ref->getKind() == MCSymbolRefExpr::VK_SECREL) { 237 FixupKind = MCFixupKind(FK_SecRel_4); 238 } 239 } 240 } 241 242 // If the fixup is pc-relative, we need to bias the value to be relative to 243 // the start of the field, not the end of the field. 244 if (FixupKind == FK_PCRel_4 || 245 FixupKind == MCFixupKind(X86::reloc_riprel_4byte) || 246 FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load)) 247 ImmOffset -= 4; 248 if (FixupKind == FK_PCRel_2) 249 ImmOffset -= 2; 250 if (FixupKind == FK_PCRel_1) 251 ImmOffset -= 1; 252 253 if (ImmOffset) 254 Expr = MCBinaryExpr::CreateAdd(Expr, MCConstantExpr::Create(ImmOffset, Ctx), 255 Ctx); 256 257 // Emit a symbolic constant as a fixup and 4 zeros. 258 Fixups.push_back(MCFixup::Create(CurByte, Expr, FixupKind, Loc)); 259 EmitConstant(0, Size, CurByte, OS); 260} 261 262void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op, 263 unsigned RegOpcodeField, 264 uint64_t TSFlags, unsigned &CurByte, 265 raw_ostream &OS, 266 SmallVectorImpl<MCFixup> &Fixups) const{ 267 const MCOperand &Disp = MI.getOperand(Op+X86::AddrDisp); 268 const MCOperand &Base = MI.getOperand(Op+X86::AddrBaseReg); 269 const MCOperand &Scale = MI.getOperand(Op+X86::AddrScaleAmt); 270 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg); 271 unsigned BaseReg = Base.getReg(); 272 273 // Handle %rip relative addressing. 274 if (BaseReg == X86::RIP) { // [disp32+RIP] in X86-64 mode 275 assert(is64BitMode() && "Rip-relative addressing requires 64-bit mode"); 276 assert(IndexReg.getReg() == 0 && "Invalid rip-relative address"); 277 EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS); 278 279 unsigned FixupKind = X86::reloc_riprel_4byte; 280 281 // movq loads are handled with a special relocation form which allows the 282 // linker to eliminate some loads for GOT references which end up in the 283 // same linkage unit. 284 if (MI.getOpcode() == X86::MOV64rm) 285 FixupKind = X86::reloc_riprel_4byte_movq_load; 286 287 // rip-relative addressing is actually relative to the *next* instruction. 288 // Since an immediate can follow the mod/rm byte for an instruction, this 289 // means that we need to bias the immediate field of the instruction with 290 // the size of the immediate field. If we have this case, add it into the 291 // expression to emit. 292 int ImmSize = X86II::hasImm(TSFlags) ? X86II::getSizeOfImm(TSFlags) : 0; 293 294 EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind), 295 CurByte, OS, Fixups, -ImmSize); 296 return; 297 } 298 299 unsigned BaseRegNo = BaseReg ? GetX86RegNum(Base) : -1U; 300 301 // Determine whether a SIB byte is needed. 302 // If no BaseReg, issue a RIP relative instruction only if the MCE can 303 // resolve addresses on-the-fly, otherwise use SIB (Intel Manual 2A, table 304 // 2-7) and absolute references. 305 306 if (// The SIB byte must be used if there is an index register. 307 IndexReg.getReg() == 0 && 308 // The SIB byte must be used if the base is ESP/RSP/R12, all of which 309 // encode to an R/M value of 4, which indicates that a SIB byte is 310 // present. 311 BaseRegNo != N86::ESP && 312 // If there is no base register and we're in 64-bit mode, we need a SIB 313 // byte to emit an addr that is just 'disp32' (the non-RIP relative form). 314 (!is64BitMode() || BaseReg != 0)) { 315 316 if (BaseReg == 0) { // [disp32] in X86-32 mode 317 EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS); 318 EmitImmediate(Disp, MI.getLoc(), 4, FK_Data_4, CurByte, OS, Fixups); 319 return; 320 } 321 322 // If the base is not EBP/ESP and there is no displacement, use simple 323 // indirect register encoding, this handles addresses like [EAX]. The 324 // encoding for [EBP] with no displacement means [disp32] so we handle it 325 // by emitting a displacement of 0 below. 326 if (Disp.isImm() && Disp.getImm() == 0 && BaseRegNo != N86::EBP) { 327 EmitByte(ModRMByte(0, RegOpcodeField, BaseRegNo), CurByte, OS); 328 return; 329 } 330 331 // Otherwise, if the displacement fits in a byte, encode as [REG+disp8]. 332 if (Disp.isImm() && isDisp8(Disp.getImm())) { 333 EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS); 334 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups); 335 return; 336 } 337 338 // Otherwise, emit the most general non-SIB encoding: [REG+disp32] 339 EmitByte(ModRMByte(2, RegOpcodeField, BaseRegNo), CurByte, OS); 340 EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte), CurByte, OS, 341 Fixups); 342 return; 343 } 344 345 // We need a SIB byte, so start by outputting the ModR/M byte first 346 assert(IndexReg.getReg() != X86::ESP && 347 IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!"); 348 349 bool ForceDisp32 = false; 350 bool ForceDisp8 = false; 351 if (BaseReg == 0) { 352 // If there is no base register, we emit the special case SIB byte with 353 // MOD=0, BASE=5, to JUST get the index, scale, and displacement. 354 EmitByte(ModRMByte(0, RegOpcodeField, 4), CurByte, OS); 355 ForceDisp32 = true; 356 } else if (!Disp.isImm()) { 357 // Emit the normal disp32 encoding. 358 EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS); 359 ForceDisp32 = true; 360 } else if (Disp.getImm() == 0 && 361 // Base reg can't be anything that ends up with '5' as the base 362 // reg, it is the magic [*] nomenclature that indicates no base. 363 BaseRegNo != N86::EBP) { 364 // Emit no displacement ModR/M byte 365 EmitByte(ModRMByte(0, RegOpcodeField, 4), CurByte, OS); 366 } else if (isDisp8(Disp.getImm())) { 367 // Emit the disp8 encoding. 368 EmitByte(ModRMByte(1, RegOpcodeField, 4), CurByte, OS); 369 ForceDisp8 = true; // Make sure to force 8 bit disp if Base=EBP 370 } else { 371 // Emit the normal disp32 encoding. 372 EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS); 373 } 374 375 // Calculate what the SS field value should be... 376 static const unsigned SSTable[] = { ~0U, 0, 1, ~0U, 2, ~0U, ~0U, ~0U, 3 }; 377 unsigned SS = SSTable[Scale.getImm()]; 378 379 if (BaseReg == 0) { 380 // Handle the SIB byte for the case where there is no base, see Intel 381 // Manual 2A, table 2-7. The displacement has already been output. 382 unsigned IndexRegNo; 383 if (IndexReg.getReg()) 384 IndexRegNo = GetX86RegNum(IndexReg); 385 else // Examples: [ESP+1*<noreg>+4] or [scaled idx]+disp32 (MOD=0,BASE=5) 386 IndexRegNo = 4; 387 EmitSIBByte(SS, IndexRegNo, 5, CurByte, OS); 388 } else { 389 unsigned IndexRegNo; 390 if (IndexReg.getReg()) 391 IndexRegNo = GetX86RegNum(IndexReg); 392 else 393 IndexRegNo = 4; // For example [ESP+1*<noreg>+4] 394 EmitSIBByte(SS, IndexRegNo, GetX86RegNum(Base), CurByte, OS); 395 } 396 397 // Do we need to output a displacement? 398 if (ForceDisp8) 399 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups); 400 else if (ForceDisp32 || Disp.getImm() != 0) 401 EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte), 402 CurByte, OS, Fixups); 403} 404 405/// EmitVEXOpcodePrefix - AVX instructions are encoded using a opcode prefix 406/// called VEX. 407void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, 408 int MemOperand, const MCInst &MI, 409 const MCInstrDesc &Desc, 410 raw_ostream &OS) const { 411 bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V; 412 bool HasVEX_4VOp3 = (TSFlags >> X86II::VEXShift) & X86II::VEX_4VOp3; 413 414 // VEX_R: opcode externsion equivalent to REX.R in 415 // 1's complement (inverted) form 416 // 417 // 1: Same as REX_R=0 (must be 1 in 32-bit mode) 418 // 0: Same as REX_R=1 (64 bit mode only) 419 // 420 unsigned char VEX_R = 0x1; 421 422 // VEX_X: equivalent to REX.X, only used when a 423 // register is used for index in SIB Byte. 424 // 425 // 1: Same as REX.X=0 (must be 1 in 32-bit mode) 426 // 0: Same as REX.X=1 (64-bit mode only) 427 unsigned char VEX_X = 0x1; 428 429 // VEX_B: 430 // 431 // 1: Same as REX_B=0 (ignored in 32-bit mode) 432 // 0: Same as REX_B=1 (64 bit mode only) 433 // 434 unsigned char VEX_B = 0x1; 435 436 // VEX_W: opcode specific (use like REX.W, or used for 437 // opcode extension, or ignored, depending on the opcode byte) 438 unsigned char VEX_W = 0; 439 440 // XOP: Use XOP prefix byte 0x8f instead of VEX. 441 unsigned char XOP = 0; 442 443 // VEX_5M (VEX m-mmmmm field): 444 // 445 // 0b00000: Reserved for future use 446 // 0b00001: implied 0F leading opcode 447 // 0b00010: implied 0F 38 leading opcode bytes 448 // 0b00011: implied 0F 3A leading opcode bytes 449 // 0b00100-0b11111: Reserved for future use 450 // 0b01000: XOP map select - 08h instructions with imm byte 451 // 0b10001: XOP map select - 09h instructions with no imm byte 452 unsigned char VEX_5M = 0x1; 453 454 // VEX_4V (VEX vvvv field): a register specifier 455 // (in 1's complement form) or 1111 if unused. 456 unsigned char VEX_4V = 0xf; 457 458 // VEX_L (Vector Length): 459 // 460 // 0: scalar or 128-bit vector 461 // 1: 256-bit vector 462 // 463 unsigned char VEX_L = 0; 464 465 // VEX_PP: opcode extension providing equivalent 466 // functionality of a SIMD prefix 467 // 468 // 0b00: None 469 // 0b01: 66 470 // 0b10: F3 471 // 0b11: F2 472 // 473 unsigned char VEX_PP = 0; 474 475 // Encode the operand size opcode prefix as needed. 476 if (TSFlags & X86II::OpSize) 477 VEX_PP = 0x01; 478 479 if ((TSFlags >> X86II::VEXShift) & X86II::VEX_W) 480 VEX_W = 1; 481 482 if ((TSFlags >> X86II::VEXShift) & X86II::XOP) 483 XOP = 1; 484 485 if ((TSFlags >> X86II::VEXShift) & X86II::VEX_L) 486 VEX_L = 1; 487 488 switch (TSFlags & X86II::Op0Mask) { 489 default: llvm_unreachable("Invalid prefix!"); 490 case X86II::T8: // 0F 38 491 VEX_5M = 0x2; 492 break; 493 case X86II::TA: // 0F 3A 494 VEX_5M = 0x3; 495 break; 496 case X86II::T8XS: // F3 0F 38 497 VEX_PP = 0x2; 498 VEX_5M = 0x2; 499 break; 500 case X86II::T8XD: // F2 0F 38 501 VEX_PP = 0x3; 502 VEX_5M = 0x2; 503 break; 504 case X86II::TAXD: // F2 0F 3A 505 VEX_PP = 0x3; 506 VEX_5M = 0x3; 507 break; 508 case X86II::XS: // F3 0F 509 VEX_PP = 0x2; 510 break; 511 case X86II::XD: // F2 0F 512 VEX_PP = 0x3; 513 break; 514 case X86II::XOP8: 515 VEX_5M = 0x8; 516 break; 517 case X86II::XOP9: 518 VEX_5M = 0x9; 519 break; 520 case X86II::A6: // Bypass: Not used by VEX 521 case X86II::A7: // Bypass: Not used by VEX 522 case X86II::TB: // Bypass: Not used by VEX 523 case 0: 524 break; // No prefix! 525 } 526 527 528 // Set the vector length to 256-bit if YMM0-YMM15 is used 529 for (unsigned i = 0; i != MI.getNumOperands(); ++i) { 530 if (!MI.getOperand(i).isReg()) 531 continue; 532 unsigned SrcReg = MI.getOperand(i).getReg(); 533 if (SrcReg >= X86::YMM0 && SrcReg <= X86::YMM15) 534 VEX_L = 1; 535 } 536 537 // Classify VEX_B, VEX_4V, VEX_R, VEX_X 538 unsigned CurOp = 0; 539 switch (TSFlags & X86II::FormMask) { 540 case X86II::MRMInitReg: llvm_unreachable("FIXME: Remove this!"); 541 case X86II::MRMDestMem: { 542 // MRMDestMem instructions forms: 543 // MemAddr, src1(ModR/M) 544 // MemAddr, src1(VEX_4V), src2(ModR/M) 545 // MemAddr, src1(ModR/M), imm8 546 // 547 if (X86II::isX86_64ExtendedReg(MI.getOperand(X86::AddrBaseReg).getReg())) 548 VEX_B = 0x0; 549 if (X86II::isX86_64ExtendedReg(MI.getOperand(X86::AddrIndexReg).getReg())) 550 VEX_X = 0x0; 551 552 CurOp = X86::AddrNumOperands; 553 if (HasVEX_4V) 554 VEX_4V = getVEXRegisterEncoding(MI, CurOp++); 555 556 const MCOperand &MO = MI.getOperand(CurOp); 557 if (MO.isReg() && X86II::isX86_64ExtendedReg(MO.getReg())) 558 VEX_R = 0x0; 559 break; 560 } 561 case X86II::MRMSrcMem: 562 // MRMSrcMem instructions forms: 563 // src1(ModR/M), MemAddr 564 // src1(ModR/M), src2(VEX_4V), MemAddr 565 // src1(ModR/M), MemAddr, imm8 566 // src1(ModR/M), MemAddr, src2(VEX_I8IMM) 567 // 568 // FMA4: 569 // dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM) 570 // dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M), 571 if (X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg())) 572 VEX_R = 0x0; 573 574 if (HasVEX_4V) 575 VEX_4V = getVEXRegisterEncoding(MI, 1); 576 577 if (X86II::isX86_64ExtendedReg( 578 MI.getOperand(MemOperand+X86::AddrBaseReg).getReg())) 579 VEX_B = 0x0; 580 if (X86II::isX86_64ExtendedReg( 581 MI.getOperand(MemOperand+X86::AddrIndexReg).getReg())) 582 VEX_X = 0x0; 583 584 if (HasVEX_4VOp3) 585 VEX_4V = getVEXRegisterEncoding(MI, X86::AddrNumOperands+1); 586 break; 587 case X86II::MRM0m: case X86II::MRM1m: 588 case X86II::MRM2m: case X86II::MRM3m: 589 case X86II::MRM4m: case X86II::MRM5m: 590 case X86II::MRM6m: case X86II::MRM7m: { 591 // MRM[0-9]m instructions forms: 592 // MemAddr 593 // src1(VEX_4V), MemAddr 594 if (HasVEX_4V) 595 VEX_4V = getVEXRegisterEncoding(MI, 0); 596 597 if (X86II::isX86_64ExtendedReg( 598 MI.getOperand(MemOperand+X86::AddrBaseReg).getReg())) 599 VEX_B = 0x0; 600 if (X86II::isX86_64ExtendedReg( 601 MI.getOperand(MemOperand+X86::AddrIndexReg).getReg())) 602 VEX_X = 0x0; 603 break; 604 } 605 case X86II::MRMSrcReg: 606 // MRMSrcReg instructions forms: 607 // dst(ModR/M), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM) 608 // dst(ModR/M), src1(ModR/M) 609 // dst(ModR/M), src1(ModR/M), imm8 610 // 611 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg())) 612 VEX_R = 0x0; 613 CurOp++; 614 615 if (HasVEX_4V) 616 VEX_4V = getVEXRegisterEncoding(MI, CurOp++); 617 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg())) 618 VEX_B = 0x0; 619 CurOp++; 620 if (HasVEX_4VOp3) 621 VEX_4V = getVEXRegisterEncoding(MI, CurOp); 622 break; 623 case X86II::MRMDestReg: 624 // MRMDestReg instructions forms: 625 // dst(ModR/M), src(ModR/M) 626 // dst(ModR/M), src(ModR/M), imm8 627 if (X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg())) 628 VEX_B = 0x0; 629 if (X86II::isX86_64ExtendedReg(MI.getOperand(1).getReg())) 630 VEX_R = 0x0; 631 break; 632 case X86II::MRM0r: case X86II::MRM1r: 633 case X86II::MRM2r: case X86II::MRM3r: 634 case X86II::MRM4r: case X86II::MRM5r: 635 case X86II::MRM6r: case X86II::MRM7r: 636 // MRM0r-MRM7r instructions forms: 637 // dst(VEX_4V), src(ModR/M), imm8 638 VEX_4V = getVEXRegisterEncoding(MI, 0); 639 if (X86II::isX86_64ExtendedReg(MI.getOperand(1).getReg())) 640 VEX_B = 0x0; 641 break; 642 default: // RawFrm 643 break; 644 } 645 646 // Emit segment override opcode prefix as needed. 647 EmitSegmentOverridePrefix(TSFlags, CurByte, MemOperand, MI, OS); 648 649 // VEX opcode prefix can have 2 or 3 bytes 650 // 651 // 3 bytes: 652 // +-----+ +--------------+ +-------------------+ 653 // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp | 654 // +-----+ +--------------+ +-------------------+ 655 // 2 bytes: 656 // +-----+ +-------------------+ 657 // | C5h | | R | vvvv | L | pp | 658 // +-----+ +-------------------+ 659 // 660 unsigned char LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3); 661 662 if (VEX_B && VEX_X && !VEX_W && !XOP && (VEX_5M == 1)) { // 2 byte VEX prefix 663 EmitByte(0xC5, CurByte, OS); 664 EmitByte(LastByte | (VEX_R << 7), CurByte, OS); 665 return; 666 } 667 668 // 3 byte VEX prefix 669 EmitByte(XOP ? 0x8F : 0xC4, CurByte, OS); 670 EmitByte(VEX_R << 7 | VEX_X << 6 | VEX_B << 5 | VEX_5M, CurByte, OS); 671 EmitByte(LastByte | (VEX_W << 7), CurByte, OS); 672} 673 674/// DetermineREXPrefix - Determine if the MCInst has to be encoded with a X86-64 675/// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand 676/// size, and 3) use of X86-64 extended registers. 677static unsigned DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags, 678 const MCInstrDesc &Desc) { 679 unsigned REX = 0; 680 if (TSFlags & X86II::REX_W) 681 REX |= 1 << 3; // set REX.W 682 683 if (MI.getNumOperands() == 0) return REX; 684 685 unsigned NumOps = MI.getNumOperands(); 686 // FIXME: MCInst should explicitize the two-addrness. 687 bool isTwoAddr = NumOps > 1 && 688 Desc.getOperandConstraint(1, MCOI::TIED_TO) != -1; 689 690 // If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix. 691 unsigned i = isTwoAddr ? 1 : 0; 692 for (; i != NumOps; ++i) { 693 const MCOperand &MO = MI.getOperand(i); 694 if (!MO.isReg()) continue; 695 unsigned Reg = MO.getReg(); 696 if (!X86II::isX86_64NonExtLowByteReg(Reg)) continue; 697 // FIXME: The caller of DetermineREXPrefix slaps this prefix onto anything 698 // that returns non-zero. 699 REX |= 0x40; // REX fixed encoding prefix 700 break; 701 } 702 703 switch (TSFlags & X86II::FormMask) { 704 case X86II::MRMInitReg: llvm_unreachable("FIXME: Remove this!"); 705 case X86II::MRMSrcReg: 706 if (MI.getOperand(0).isReg() && 707 X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg())) 708 REX |= 1 << 2; // set REX.R 709 i = isTwoAddr ? 2 : 1; 710 for (; i != NumOps; ++i) { 711 const MCOperand &MO = MI.getOperand(i); 712 if (MO.isReg() && X86II::isX86_64ExtendedReg(MO.getReg())) 713 REX |= 1 << 0; // set REX.B 714 } 715 break; 716 case X86II::MRMSrcMem: { 717 if (MI.getOperand(0).isReg() && 718 X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg())) 719 REX |= 1 << 2; // set REX.R 720 unsigned Bit = 0; 721 i = isTwoAddr ? 2 : 1; 722 for (; i != NumOps; ++i) { 723 const MCOperand &MO = MI.getOperand(i); 724 if (MO.isReg()) { 725 if (X86II::isX86_64ExtendedReg(MO.getReg())) 726 REX |= 1 << Bit; // set REX.B (Bit=0) and REX.X (Bit=1) 727 Bit++; 728 } 729 } 730 break; 731 } 732 case X86II::MRM0m: case X86II::MRM1m: 733 case X86II::MRM2m: case X86II::MRM3m: 734 case X86II::MRM4m: case X86II::MRM5m: 735 case X86II::MRM6m: case X86II::MRM7m: 736 case X86II::MRMDestMem: { 737 unsigned e = (isTwoAddr ? X86::AddrNumOperands+1 : X86::AddrNumOperands); 738 i = isTwoAddr ? 1 : 0; 739 if (NumOps > e && MI.getOperand(e).isReg() && 740 X86II::isX86_64ExtendedReg(MI.getOperand(e).getReg())) 741 REX |= 1 << 2; // set REX.R 742 unsigned Bit = 0; 743 for (; i != e; ++i) { 744 const MCOperand &MO = MI.getOperand(i); 745 if (MO.isReg()) { 746 if (X86II::isX86_64ExtendedReg(MO.getReg())) 747 REX |= 1 << Bit; // REX.B (Bit=0) and REX.X (Bit=1) 748 Bit++; 749 } 750 } 751 break; 752 } 753 default: 754 if (MI.getOperand(0).isReg() && 755 X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg())) 756 REX |= 1 << 0; // set REX.B 757 i = isTwoAddr ? 2 : 1; 758 for (unsigned e = NumOps; i != e; ++i) { 759 const MCOperand &MO = MI.getOperand(i); 760 if (MO.isReg() && X86II::isX86_64ExtendedReg(MO.getReg())) 761 REX |= 1 << 2; // set REX.R 762 } 763 break; 764 } 765 return REX; 766} 767 768/// EmitSegmentOverridePrefix - Emit segment override opcode prefix as needed 769void X86MCCodeEmitter::EmitSegmentOverridePrefix(uint64_t TSFlags, 770 unsigned &CurByte, int MemOperand, 771 const MCInst &MI, 772 raw_ostream &OS) const { 773 switch (TSFlags & X86II::SegOvrMask) { 774 default: llvm_unreachable("Invalid segment!"); 775 case 0: 776 // No segment override, check for explicit one on memory operand. 777 if (MemOperand != -1) { // If the instruction has a memory operand. 778 switch (MI.getOperand(MemOperand+X86::AddrSegmentReg).getReg()) { 779 default: llvm_unreachable("Unknown segment register!"); 780 case 0: break; 781 case X86::CS: EmitByte(0x2E, CurByte, OS); break; 782 case X86::SS: EmitByte(0x36, CurByte, OS); break; 783 case X86::DS: EmitByte(0x3E, CurByte, OS); break; 784 case X86::ES: EmitByte(0x26, CurByte, OS); break; 785 case X86::FS: EmitByte(0x64, CurByte, OS); break; 786 case X86::GS: EmitByte(0x65, CurByte, OS); break; 787 } 788 } 789 break; 790 case X86II::FS: 791 EmitByte(0x64, CurByte, OS); 792 break; 793 case X86II::GS: 794 EmitByte(0x65, CurByte, OS); 795 break; 796 } 797} 798 799/// EmitOpcodePrefix - Emit all instruction prefixes prior to the opcode. 800/// 801/// MemOperand is the operand # of the start of a memory operand if present. If 802/// Not present, it is -1. 803void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, 804 int MemOperand, const MCInst &MI, 805 const MCInstrDesc &Desc, 806 raw_ostream &OS) const { 807 808 // Emit the lock opcode prefix as needed. 809 if (TSFlags & X86II::LOCK) 810 EmitByte(0xF0, CurByte, OS); 811 812 // Emit segment override opcode prefix as needed. 813 EmitSegmentOverridePrefix(TSFlags, CurByte, MemOperand, MI, OS); 814 815 // Emit the repeat opcode prefix as needed. 816 if ((TSFlags & X86II::Op0Mask) == X86II::REP) 817 EmitByte(0xF3, CurByte, OS); 818 819 // Emit the address size opcode prefix as needed. 820 if ((TSFlags & X86II::AdSize) || 821 (MemOperand != -1 && is64BitMode() && Is32BitMemOperand(MI, MemOperand))) 822 EmitByte(0x67, CurByte, OS); 823 824 // Emit the operand size opcode prefix as needed. 825 if (TSFlags & X86II::OpSize) 826 EmitByte(0x66, CurByte, OS); 827 828 bool Need0FPrefix = false; 829 switch (TSFlags & X86II::Op0Mask) { 830 default: llvm_unreachable("Invalid prefix!"); 831 case 0: break; // No prefix! 832 case X86II::REP: break; // already handled. 833 case X86II::TB: // Two-byte opcode prefix 834 case X86II::T8: // 0F 38 835 case X86II::TA: // 0F 3A 836 case X86II::A6: // 0F A6 837 case X86II::A7: // 0F A7 838 Need0FPrefix = true; 839 break; 840 case X86II::T8XS: // F3 0F 38 841 EmitByte(0xF3, CurByte, OS); 842 Need0FPrefix = true; 843 break; 844 case X86II::T8XD: // F2 0F 38 845 EmitByte(0xF2, CurByte, OS); 846 Need0FPrefix = true; 847 break; 848 case X86II::TAXD: // F2 0F 3A 849 EmitByte(0xF2, CurByte, OS); 850 Need0FPrefix = true; 851 break; 852 case X86II::XS: // F3 0F 853 EmitByte(0xF3, CurByte, OS); 854 Need0FPrefix = true; 855 break; 856 case X86II::XD: // F2 0F 857 EmitByte(0xF2, CurByte, OS); 858 Need0FPrefix = true; 859 break; 860 case X86II::D8: EmitByte(0xD8, CurByte, OS); break; 861 case X86II::D9: EmitByte(0xD9, CurByte, OS); break; 862 case X86II::DA: EmitByte(0xDA, CurByte, OS); break; 863 case X86II::DB: EmitByte(0xDB, CurByte, OS); break; 864 case X86II::DC: EmitByte(0xDC, CurByte, OS); break; 865 case X86II::DD: EmitByte(0xDD, CurByte, OS); break; 866 case X86II::DE: EmitByte(0xDE, CurByte, OS); break; 867 case X86II::DF: EmitByte(0xDF, CurByte, OS); break; 868 } 869 870 // Handle REX prefix. 871 // FIXME: Can this come before F2 etc to simplify emission? 872 if (is64BitMode()) { 873 if (unsigned REX = DetermineREXPrefix(MI, TSFlags, Desc)) 874 EmitByte(0x40 | REX, CurByte, OS); 875 } 876 877 // 0x0F escape code must be emitted just before the opcode. 878 if (Need0FPrefix) 879 EmitByte(0x0F, CurByte, OS); 880 881 // FIXME: Pull this up into previous switch if REX can be moved earlier. 882 switch (TSFlags & X86II::Op0Mask) { 883 case X86II::T8XS: // F3 0F 38 884 case X86II::T8XD: // F2 0F 38 885 case X86II::T8: // 0F 38 886 EmitByte(0x38, CurByte, OS); 887 break; 888 case X86II::TAXD: // F2 0F 3A 889 case X86II::TA: // 0F 3A 890 EmitByte(0x3A, CurByte, OS); 891 break; 892 case X86II::A6: // 0F A6 893 EmitByte(0xA6, CurByte, OS); 894 break; 895 case X86II::A7: // 0F A7 896 EmitByte(0xA7, CurByte, OS); 897 break; 898 } 899} 900 901void X86MCCodeEmitter:: 902EncodeInstruction(const MCInst &MI, raw_ostream &OS, 903 SmallVectorImpl<MCFixup> &Fixups) const { 904 unsigned Opcode = MI.getOpcode(); 905 const MCInstrDesc &Desc = MCII.get(Opcode); 906 uint64_t TSFlags = Desc.TSFlags; 907 908 // Pseudo instructions don't get encoded. 909 if ((TSFlags & X86II::FormMask) == X86II::Pseudo) 910 return; 911 912 // If this is a two-address instruction, skip one of the register operands. 913 // FIXME: This should be handled during MCInst lowering. 914 unsigned NumOps = Desc.getNumOperands(); 915 unsigned CurOp = 0; 916 if (NumOps > 1 && Desc.getOperandConstraint(1, MCOI::TIED_TO) != -1) 917 ++CurOp; 918 else if (NumOps > 2 && Desc.getOperandConstraint(NumOps-1, MCOI::TIED_TO)== 0) 919 // Skip the last source operand that is tied_to the dest reg. e.g. LXADD32 920 --NumOps; 921 922 // Keep track of the current byte being emitted. 923 unsigned CurByte = 0; 924 925 // Is this instruction encoded using the AVX VEX prefix? 926 bool HasVEXPrefix = (TSFlags >> X86II::VEXShift) & X86II::VEX; 927 928 // It uses the VEX.VVVV field? 929 bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V; 930 bool HasVEX_4VOp3 = (TSFlags >> X86II::VEXShift) & X86II::VEX_4VOp3; 931 bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4; 932 const unsigned MemOp4_I8IMMOperand = 2; 933 934 // Determine where the memory operand starts, if present. 935 int MemoryOperand = X86II::getMemoryOperandNo(TSFlags, Opcode); 936 if (MemoryOperand != -1) MemoryOperand += CurOp; 937 938 if (!HasVEXPrefix) 939 EmitOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS); 940 else 941 EmitVEXOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS); 942 943 unsigned char BaseOpcode = X86II::getBaseOpcodeFor(TSFlags); 944 945 if ((TSFlags >> X86II::VEXShift) & X86II::Has3DNow0F0FOpcode) 946 BaseOpcode = 0x0F; // Weird 3DNow! encoding. 947 948 unsigned SrcRegNum = 0; 949 switch (TSFlags & X86II::FormMask) { 950 case X86II::MRMInitReg: 951 llvm_unreachable("FIXME: Remove this form when the JIT moves to MCCodeEmitter!"); 952 default: errs() << "FORM: " << (TSFlags & X86II::FormMask) << "\n"; 953 llvm_unreachable("Unknown FormMask value in X86MCCodeEmitter!"); 954 case X86II::Pseudo: 955 llvm_unreachable("Pseudo instruction shouldn't be emitted"); 956 case X86II::RawFrm: 957 EmitByte(BaseOpcode, CurByte, OS); 958 break; 959 case X86II::RawFrmImm8: 960 EmitByte(BaseOpcode, CurByte, OS); 961 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 962 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags), 963 CurByte, OS, Fixups); 964 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1, FK_Data_1, CurByte, 965 OS, Fixups); 966 break; 967 case X86II::RawFrmImm16: 968 EmitByte(BaseOpcode, CurByte, OS); 969 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 970 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags), 971 CurByte, OS, Fixups); 972 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 2, FK_Data_2, CurByte, 973 OS, Fixups); 974 break; 975 976 case X86II::AddRegFrm: 977 EmitByte(BaseOpcode + GetX86RegNum(MI.getOperand(CurOp++)), CurByte, OS); 978 break; 979 980 case X86II::MRMDestReg: 981 EmitByte(BaseOpcode, CurByte, OS); 982 EmitRegModRMByte(MI.getOperand(CurOp), 983 GetX86RegNum(MI.getOperand(CurOp+1)), CurByte, OS); 984 CurOp += 2; 985 break; 986 987 case X86II::MRMDestMem: 988 EmitByte(BaseOpcode, CurByte, OS); 989 SrcRegNum = CurOp + X86::AddrNumOperands; 990 991 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV) 992 SrcRegNum++; 993 994 EmitMemModRMByte(MI, CurOp, 995 GetX86RegNum(MI.getOperand(SrcRegNum)), 996 TSFlags, CurByte, OS, Fixups); 997 CurOp = SrcRegNum + 1; 998 break; 999 1000 case X86II::MRMSrcReg: 1001 EmitByte(BaseOpcode, CurByte, OS); 1002 SrcRegNum = CurOp + 1; 1003 1004 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV) 1005 SrcRegNum++; 1006 1007 if(HasMemOp4) // Skip 2nd src (which is encoded in I8IMM) 1008 SrcRegNum++; 1009 1010 EmitRegModRMByte(MI.getOperand(SrcRegNum), 1011 GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS); 1012 1013 // 2 operands skipped with HasMemOp4, comensate accordingly 1014 CurOp = HasMemOp4 ? SrcRegNum : SrcRegNum + 1; 1015 if (HasVEX_4VOp3) 1016 ++CurOp; 1017 break; 1018 1019 case X86II::MRMSrcMem: { 1020 int AddrOperands = X86::AddrNumOperands; 1021 unsigned FirstMemOp = CurOp+1; 1022 if (HasVEX_4V) { 1023 ++AddrOperands; 1024 ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV). 1025 } 1026 if(HasMemOp4) // Skip second register source (encoded in I8IMM) 1027 ++FirstMemOp; 1028 1029 EmitByte(BaseOpcode, CurByte, OS); 1030 1031 EmitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)), 1032 TSFlags, CurByte, OS, Fixups); 1033 CurOp += AddrOperands + 1; 1034 if (HasVEX_4VOp3) 1035 ++CurOp; 1036 break; 1037 } 1038 1039 case X86II::MRM0r: case X86II::MRM1r: 1040 case X86II::MRM2r: case X86II::MRM3r: 1041 case X86II::MRM4r: case X86II::MRM5r: 1042 case X86II::MRM6r: case X86II::MRM7r: 1043 if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV). 1044 CurOp++; 1045 EmitByte(BaseOpcode, CurByte, OS); 1046 EmitRegModRMByte(MI.getOperand(CurOp++), 1047 (TSFlags & X86II::FormMask)-X86II::MRM0r, 1048 CurByte, OS); 1049 break; 1050 case X86II::MRM0m: case X86II::MRM1m: 1051 case X86II::MRM2m: case X86II::MRM3m: 1052 case X86II::MRM4m: case X86II::MRM5m: 1053 case X86II::MRM6m: case X86II::MRM7m: 1054 if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV). 1055 CurOp++; 1056 EmitByte(BaseOpcode, CurByte, OS); 1057 EmitMemModRMByte(MI, CurOp, (TSFlags & X86II::FormMask)-X86II::MRM0m, 1058 TSFlags, CurByte, OS, Fixups); 1059 CurOp += X86::AddrNumOperands; 1060 break; 1061 case X86II::MRM_C1: case X86II::MRM_C2: 1062 case X86II::MRM_C3: case X86II::MRM_C4: 1063 case X86II::MRM_C8: case X86II::MRM_C9: 1064 case X86II::MRM_D0: case X86II::MRM_D1: 1065 case X86II::MRM_D8: case X86II::MRM_D9: 1066 case X86II::MRM_DA: case X86II::MRM_DB: 1067 case X86II::MRM_DC: case X86II::MRM_DD: 1068 case X86II::MRM_DE: case X86II::MRM_DF: 1069 case X86II::MRM_E8: case X86II::MRM_F0: 1070 case X86II::MRM_F8: case X86II::MRM_F9: 1071 EmitByte(BaseOpcode, CurByte, OS); 1072 1073 unsigned char MRM; 1074 switch (TSFlags & X86II::FormMask) { 1075 default: llvm_unreachable("Invalid Form"); 1076 case X86II::MRM_C1: MRM = 0xC1; break; 1077 case X86II::MRM_C2: MRM = 0xC2; break; 1078 case X86II::MRM_C3: MRM = 0xC3; break; 1079 case X86II::MRM_C4: MRM = 0xC4; break; 1080 case X86II::MRM_C8: MRM = 0xC8; break; 1081 case X86II::MRM_C9: MRM = 0xC9; break; 1082 case X86II::MRM_D0: MRM = 0xD0; break; 1083 case X86II::MRM_D1: MRM = 0xD1; break; 1084 case X86II::MRM_D8: MRM = 0xD8; break; 1085 case X86II::MRM_D9: MRM = 0xD9; break; 1086 case X86II::MRM_DA: MRM = 0xDA; break; 1087 case X86II::MRM_DB: MRM = 0xDB; break; 1088 case X86II::MRM_DC: MRM = 0xDC; break; 1089 case X86II::MRM_DD: MRM = 0xDD; break; 1090 case X86II::MRM_DE: MRM = 0xDE; break; 1091 case X86II::MRM_DF: MRM = 0xDF; break; 1092 case X86II::MRM_E8: MRM = 0xE8; break; 1093 case X86II::MRM_F0: MRM = 0xF0; break; 1094 case X86II::MRM_F8: MRM = 0xF8; break; 1095 case X86II::MRM_F9: MRM = 0xF9; break; 1096 } 1097 EmitByte(MRM, CurByte, OS); 1098 break; 1099 } 1100 1101 // If there is a remaining operand, it must be a trailing immediate. Emit it 1102 // according to the right size for the instruction. 1103 if (CurOp != NumOps) { 1104 // The last source register of a 4 operand instruction in AVX is encoded 1105 // in bits[7:4] of a immediate byte. 1106 if ((TSFlags >> X86II::VEXShift) & X86II::VEX_I8IMM) { 1107 const MCOperand &MO = MI.getOperand(HasMemOp4 ? MemOp4_I8IMMOperand 1108 : CurOp); 1109 CurOp++; 1110 bool IsExtReg = X86II::isX86_64ExtendedReg(MO.getReg()); 1111 unsigned RegNum = (IsExtReg ? (1 << 7) : 0); 1112 RegNum |= GetX86RegNum(MO) << 4; 1113 // If there is an additional 5th operand it must be an immediate, which 1114 // is encoded in bits[3:0] 1115 if(CurOp != NumOps) { 1116 const MCOperand &MIMM = MI.getOperand(CurOp++); 1117 if(MIMM.isImm()) { 1118 unsigned Val = MIMM.getImm(); 1119 assert(Val < 16 && "Immediate operand value out of range"); 1120 RegNum |= Val; 1121 } 1122 } 1123 EmitImmediate(MCOperand::CreateImm(RegNum), MI.getLoc(), 1, FK_Data_1, 1124 CurByte, OS, Fixups); 1125 } else { 1126 unsigned FixupKind; 1127 // FIXME: Is there a better way to know that we need a signed relocation? 1128 if (MI.getOpcode() == X86::ADD64ri32 || 1129 MI.getOpcode() == X86::MOV64ri32 || 1130 MI.getOpcode() == X86::MOV64mi32 || 1131 MI.getOpcode() == X86::PUSH64i32) 1132 FixupKind = X86::reloc_signed_4byte; 1133 else 1134 FixupKind = getImmFixupKind(TSFlags); 1135 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1136 X86II::getSizeOfImm(TSFlags), MCFixupKind(FixupKind), 1137 CurByte, OS, Fixups); 1138 } 1139 } 1140 1141 if ((TSFlags >> X86II::VEXShift) & X86II::Has3DNow0F0FOpcode) 1142 EmitByte(X86II::getBaseOpcodeFor(TSFlags), CurByte, OS); 1143 1144#ifndef NDEBUG 1145 // FIXME: Verify. 1146 if (/*!Desc.isVariadic() &&*/ CurOp != NumOps) { 1147 errs() << "Cannot encode all operands of: "; 1148 MI.dump(); 1149 errs() << '\n'; 1150 abort(); 1151 } 1152#endif 1153} 1154