X86MCCodeEmitter.cpp revision 17730847d59c919d97f097d46a3fcba1888e5300
1//===-- X86/X86MCCodeEmitter.cpp - Convert X86 code to machine code -------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the X86MCCodeEmitter class.
11//
12//===----------------------------------------------------------------------===//
13
14#define DEBUG_TYPE "mccodeemitter"
15#include "MCTargetDesc/X86MCTargetDesc.h"
16#include "MCTargetDesc/X86BaseInfo.h"
17#include "MCTargetDesc/X86FixupKinds.h"
18#include "llvm/MC/MCCodeEmitter.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrInfo.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCSymbol.h"
25#include "llvm/Support/raw_ostream.h"
26
27using namespace llvm;
28
29namespace {
30class X86MCCodeEmitter : public MCCodeEmitter {
31  X86MCCodeEmitter(const X86MCCodeEmitter &); // DO NOT IMPLEMENT
32  void operator=(const X86MCCodeEmitter &); // DO NOT IMPLEMENT
33  const MCInstrInfo &MCII;
34  const MCSubtargetInfo &STI;
35  MCContext &Ctx;
36public:
37  X86MCCodeEmitter(const MCInstrInfo &mcii, const MCSubtargetInfo &sti,
38                   MCContext &ctx)
39    : MCII(mcii), STI(sti), Ctx(ctx) {
40  }
41
42  ~X86MCCodeEmitter() {}
43
44  bool is64BitMode() const {
45    // FIXME: Can tablegen auto-generate this?
46    return (STI.getFeatureBits() & X86::Mode64Bit) != 0;
47  }
48
49  static unsigned GetX86RegNum(const MCOperand &MO) {
50    return X86_MC::getX86RegNum(MO.getReg());
51  }
52
53  // On regular x86, both XMM0-XMM7 and XMM8-XMM15 are encoded in the range
54  // 0-7 and the difference between the 2 groups is given by the REX prefix.
55  // In the VEX prefix, registers are seen sequencially from 0-15 and encoded
56  // in 1's complement form, example:
57  //
58  //  ModRM field => XMM9 => 1
59  //  VEX.VVVV    => XMM9 => ~9
60  //
61  // See table 4-35 of Intel AVX Programming Reference for details.
62  static unsigned char getVEXRegisterEncoding(const MCInst &MI,
63                                              unsigned OpNum) {
64    unsigned SrcReg = MI.getOperand(OpNum).getReg();
65    unsigned SrcRegNum = GetX86RegNum(MI.getOperand(OpNum));
66    if (X86II::isX86_64ExtendedReg(SrcReg))
67      SrcRegNum |= 8;
68
69    // The registers represented through VEX_VVVV should
70    // be encoded in 1's complement form.
71    return (~SrcRegNum) & 0xf;
72  }
73
74  void EmitByte(unsigned char C, unsigned &CurByte, raw_ostream &OS) const {
75    OS << (char)C;
76    ++CurByte;
77  }
78
79  void EmitConstant(uint64_t Val, unsigned Size, unsigned &CurByte,
80                    raw_ostream &OS) const {
81    // Output the constant in little endian byte order.
82    for (unsigned i = 0; i != Size; ++i) {
83      EmitByte(Val & 255, CurByte, OS);
84      Val >>= 8;
85    }
86  }
87
88  void EmitImmediate(const MCOperand &Disp,
89                     unsigned ImmSize, MCFixupKind FixupKind,
90                     unsigned &CurByte, raw_ostream &OS,
91                     SmallVectorImpl<MCFixup> &Fixups,
92                     int ImmOffset = 0) const;
93
94  inline static unsigned char ModRMByte(unsigned Mod, unsigned RegOpcode,
95                                        unsigned RM) {
96    assert(Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!");
97    return RM | (RegOpcode << 3) | (Mod << 6);
98  }
99
100  void EmitRegModRMByte(const MCOperand &ModRMReg, unsigned RegOpcodeFld,
101                        unsigned &CurByte, raw_ostream &OS) const {
102    EmitByte(ModRMByte(3, RegOpcodeFld, GetX86RegNum(ModRMReg)), CurByte, OS);
103  }
104
105  void EmitSIBByte(unsigned SS, unsigned Index, unsigned Base,
106                   unsigned &CurByte, raw_ostream &OS) const {
107    // SIB byte is in the same format as the ModRMByte.
108    EmitByte(ModRMByte(SS, Index, Base), CurByte, OS);
109  }
110
111
112  void EmitMemModRMByte(const MCInst &MI, unsigned Op,
113                        unsigned RegOpcodeField,
114                        uint64_t TSFlags, unsigned &CurByte, raw_ostream &OS,
115                        SmallVectorImpl<MCFixup> &Fixups) const;
116
117  void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
118                         SmallVectorImpl<MCFixup> &Fixups) const;
119
120  void EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
121                           const MCInst &MI, const MCInstrDesc &Desc,
122                           raw_ostream &OS) const;
123
124  void EmitSegmentOverridePrefix(uint64_t TSFlags, unsigned &CurByte,
125                                 int MemOperand, const MCInst &MI,
126                                 raw_ostream &OS) const;
127
128  void EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
129                        const MCInst &MI, const MCInstrDesc &Desc,
130                        raw_ostream &OS) const;
131};
132
133} // end anonymous namespace
134
135
136MCCodeEmitter *llvm::createX86MCCodeEmitter(const MCInstrInfo &MCII,
137                                            const MCSubtargetInfo &STI,
138                                            MCContext &Ctx) {
139  return new X86MCCodeEmitter(MCII, STI, Ctx);
140}
141
142/// isDisp8 - Return true if this signed displacement fits in a 8-bit
143/// sign-extended field.
144static bool isDisp8(int Value) {
145  return Value == (signed char)Value;
146}
147
148/// getImmFixupKind - Return the appropriate fixup kind to use for an immediate
149/// in an instruction with the specified TSFlags.
150static MCFixupKind getImmFixupKind(uint64_t TSFlags) {
151  unsigned Size = X86II::getSizeOfImm(TSFlags);
152  bool isPCRel = X86II::isImmPCRel(TSFlags);
153
154  return MCFixup::getKindForSize(Size, isPCRel);
155}
156
157/// Is32BitMemOperand - Return true if the specified instruction with a memory
158/// operand should emit the 0x67 prefix byte in 64-bit mode due to a 32-bit
159/// memory operand.  Op specifies the operand # of the memoperand.
160static bool Is32BitMemOperand(const MCInst &MI, unsigned Op) {
161  const MCOperand &BaseReg  = MI.getOperand(Op+X86::AddrBaseReg);
162  const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
163
164  if ((BaseReg.getReg() != 0 &&
165       X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg.getReg())) ||
166      (IndexReg.getReg() != 0 &&
167       X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg.getReg())))
168    return true;
169  return false;
170}
171
172/// StartsWithGlobalOffsetTable - Return true for the simple cases where this
173/// expression starts with _GLOBAL_OFFSET_TABLE_. This is a needed to support
174/// PIC on ELF i386 as that symbol is magic. We check only simple case that
175/// are know to be used: _GLOBAL_OFFSET_TABLE_ by itself or at the start
176/// of a binary expression.
177static bool StartsWithGlobalOffsetTable(const MCExpr *Expr) {
178  if (Expr->getKind() == MCExpr::Binary) {
179    const MCBinaryExpr *BE = static_cast<const MCBinaryExpr *>(Expr);
180    Expr = BE->getLHS();
181  }
182
183  if (Expr->getKind() != MCExpr::SymbolRef)
184    return false;
185
186  const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr);
187  const MCSymbol &S = Ref->getSymbol();
188  return S.getName() == "_GLOBAL_OFFSET_TABLE_";
189}
190
191void X86MCCodeEmitter::
192EmitImmediate(const MCOperand &DispOp, unsigned Size, MCFixupKind FixupKind,
193              unsigned &CurByte, raw_ostream &OS,
194              SmallVectorImpl<MCFixup> &Fixups, int ImmOffset) const {
195  const MCExpr *Expr = NULL;
196  if (DispOp.isImm()) {
197    // If this is a simple integer displacement that doesn't require a
198    // relocation, emit it now.
199    if (FixupKind != FK_PCRel_1 &&
200        FixupKind != FK_PCRel_2 &&
201        FixupKind != FK_PCRel_4) {
202      EmitConstant(DispOp.getImm()+ImmOffset, Size, CurByte, OS);
203      return;
204    }
205    Expr = MCConstantExpr::Create(DispOp.getImm(), Ctx);
206  } else {
207    Expr = DispOp.getExpr();
208  }
209
210  // If we have an immoffset, add it to the expression.
211  if ((FixupKind == FK_Data_4 ||
212       FixupKind == MCFixupKind(X86::reloc_signed_4byte)) &&
213      StartsWithGlobalOffsetTable(Expr)) {
214    assert(ImmOffset == 0);
215
216    FixupKind = MCFixupKind(X86::reloc_global_offset_table);
217    ImmOffset = CurByte;
218  }
219
220  // If the fixup is pc-relative, we need to bias the value to be relative to
221  // the start of the field, not the end of the field.
222  if (FixupKind == FK_PCRel_4 ||
223      FixupKind == MCFixupKind(X86::reloc_riprel_4byte) ||
224      FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load))
225    ImmOffset -= 4;
226  if (FixupKind == FK_PCRel_2)
227    ImmOffset -= 2;
228  if (FixupKind == FK_PCRel_1)
229    ImmOffset -= 1;
230
231  if (ImmOffset)
232    Expr = MCBinaryExpr::CreateAdd(Expr, MCConstantExpr::Create(ImmOffset, Ctx),
233                                   Ctx);
234
235  // Emit a symbolic constant as a fixup and 4 zeros.
236  Fixups.push_back(MCFixup::Create(CurByte, Expr, FixupKind));
237  EmitConstant(0, Size, CurByte, OS);
238}
239
240void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
241                                        unsigned RegOpcodeField,
242                                        uint64_t TSFlags, unsigned &CurByte,
243                                        raw_ostream &OS,
244                                        SmallVectorImpl<MCFixup> &Fixups) const{
245  const MCOperand &Disp     = MI.getOperand(Op+X86::AddrDisp);
246  const MCOperand &Base     = MI.getOperand(Op+X86::AddrBaseReg);
247  const MCOperand &Scale    = MI.getOperand(Op+X86::AddrScaleAmt);
248  const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
249  unsigned BaseReg = Base.getReg();
250
251  // Handle %rip relative addressing.
252  if (BaseReg == X86::RIP) {    // [disp32+RIP] in X86-64 mode
253    assert(is64BitMode() && "Rip-relative addressing requires 64-bit mode");
254    assert(IndexReg.getReg() == 0 && "Invalid rip-relative address");
255    EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS);
256
257    unsigned FixupKind = X86::reloc_riprel_4byte;
258
259    // movq loads are handled with a special relocation form which allows the
260    // linker to eliminate some loads for GOT references which end up in the
261    // same linkage unit.
262    if (MI.getOpcode() == X86::MOV64rm)
263      FixupKind = X86::reloc_riprel_4byte_movq_load;
264
265    // rip-relative addressing is actually relative to the *next* instruction.
266    // Since an immediate can follow the mod/rm byte for an instruction, this
267    // means that we need to bias the immediate field of the instruction with
268    // the size of the immediate field.  If we have this case, add it into the
269    // expression to emit.
270    int ImmSize = X86II::hasImm(TSFlags) ? X86II::getSizeOfImm(TSFlags) : 0;
271
272    EmitImmediate(Disp, 4, MCFixupKind(FixupKind),
273                  CurByte, OS, Fixups, -ImmSize);
274    return;
275  }
276
277  unsigned BaseRegNo = BaseReg ? GetX86RegNum(Base) : -1U;
278
279  // Determine whether a SIB byte is needed.
280  // If no BaseReg, issue a RIP relative instruction only if the MCE can
281  // resolve addresses on-the-fly, otherwise use SIB (Intel Manual 2A, table
282  // 2-7) and absolute references.
283
284  if (// The SIB byte must be used if there is an index register.
285      IndexReg.getReg() == 0 &&
286      // The SIB byte must be used if the base is ESP/RSP/R12, all of which
287      // encode to an R/M value of 4, which indicates that a SIB byte is
288      // present.
289      BaseRegNo != N86::ESP &&
290      // If there is no base register and we're in 64-bit mode, we need a SIB
291      // byte to emit an addr that is just 'disp32' (the non-RIP relative form).
292      (!is64BitMode() || BaseReg != 0)) {
293
294    if (BaseReg == 0) {          // [disp32]     in X86-32 mode
295      EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS);
296      EmitImmediate(Disp, 4, FK_Data_4, CurByte, OS, Fixups);
297      return;
298    }
299
300    // If the base is not EBP/ESP and there is no displacement, use simple
301    // indirect register encoding, this handles addresses like [EAX].  The
302    // encoding for [EBP] with no displacement means [disp32] so we handle it
303    // by emitting a displacement of 0 below.
304    if (Disp.isImm() && Disp.getImm() == 0 && BaseRegNo != N86::EBP) {
305      EmitByte(ModRMByte(0, RegOpcodeField, BaseRegNo), CurByte, OS);
306      return;
307    }
308
309    // Otherwise, if the displacement fits in a byte, encode as [REG+disp8].
310    if (Disp.isImm() && isDisp8(Disp.getImm())) {
311      EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS);
312      EmitImmediate(Disp, 1, FK_Data_1, CurByte, OS, Fixups);
313      return;
314    }
315
316    // Otherwise, emit the most general non-SIB encoding: [REG+disp32]
317    EmitByte(ModRMByte(2, RegOpcodeField, BaseRegNo), CurByte, OS);
318    EmitImmediate(Disp, 4, MCFixupKind(X86::reloc_signed_4byte), CurByte, OS,
319                  Fixups);
320    return;
321  }
322
323  // We need a SIB byte, so start by outputting the ModR/M byte first
324  assert(IndexReg.getReg() != X86::ESP &&
325         IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!");
326
327  bool ForceDisp32 = false;
328  bool ForceDisp8  = false;
329  if (BaseReg == 0) {
330    // If there is no base register, we emit the special case SIB byte with
331    // MOD=0, BASE=5, to JUST get the index, scale, and displacement.
332    EmitByte(ModRMByte(0, RegOpcodeField, 4), CurByte, OS);
333    ForceDisp32 = true;
334  } else if (!Disp.isImm()) {
335    // Emit the normal disp32 encoding.
336    EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS);
337    ForceDisp32 = true;
338  } else if (Disp.getImm() == 0 &&
339             // Base reg can't be anything that ends up with '5' as the base
340             // reg, it is the magic [*] nomenclature that indicates no base.
341             BaseRegNo != N86::EBP) {
342    // Emit no displacement ModR/M byte
343    EmitByte(ModRMByte(0, RegOpcodeField, 4), CurByte, OS);
344  } else if (isDisp8(Disp.getImm())) {
345    // Emit the disp8 encoding.
346    EmitByte(ModRMByte(1, RegOpcodeField, 4), CurByte, OS);
347    ForceDisp8 = true;           // Make sure to force 8 bit disp if Base=EBP
348  } else {
349    // Emit the normal disp32 encoding.
350    EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS);
351  }
352
353  // Calculate what the SS field value should be...
354  static const unsigned SSTable[] = { ~0U, 0, 1, ~0U, 2, ~0U, ~0U, ~0U, 3 };
355  unsigned SS = SSTable[Scale.getImm()];
356
357  if (BaseReg == 0) {
358    // Handle the SIB byte for the case where there is no base, see Intel
359    // Manual 2A, table 2-7. The displacement has already been output.
360    unsigned IndexRegNo;
361    if (IndexReg.getReg())
362      IndexRegNo = GetX86RegNum(IndexReg);
363    else // Examples: [ESP+1*<noreg>+4] or [scaled idx]+disp32 (MOD=0,BASE=5)
364      IndexRegNo = 4;
365    EmitSIBByte(SS, IndexRegNo, 5, CurByte, OS);
366  } else {
367    unsigned IndexRegNo;
368    if (IndexReg.getReg())
369      IndexRegNo = GetX86RegNum(IndexReg);
370    else
371      IndexRegNo = 4;   // For example [ESP+1*<noreg>+4]
372    EmitSIBByte(SS, IndexRegNo, GetX86RegNum(Base), CurByte, OS);
373  }
374
375  // Do we need to output a displacement?
376  if (ForceDisp8)
377    EmitImmediate(Disp, 1, FK_Data_1, CurByte, OS, Fixups);
378  else if (ForceDisp32 || Disp.getImm() != 0)
379    EmitImmediate(Disp, 4, MCFixupKind(X86::reloc_signed_4byte), CurByte, OS,
380                  Fixups);
381}
382
383/// EmitVEXOpcodePrefix - AVX instructions are encoded using a opcode prefix
384/// called VEX.
385void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
386                                           int MemOperand, const MCInst &MI,
387                                           const MCInstrDesc &Desc,
388                                           raw_ostream &OS) const {
389  bool HasVEX_4V = false;
390  if ((TSFlags >> X86II::VEXShift) & X86II::VEX_4V)
391    HasVEX_4V = true;
392
393  // VEX_R: opcode externsion equivalent to REX.R in
394  // 1's complement (inverted) form
395  //
396  //  1: Same as REX_R=0 (must be 1 in 32-bit mode)
397  //  0: Same as REX_R=1 (64 bit mode only)
398  //
399  unsigned char VEX_R = 0x1;
400
401  // VEX_X: equivalent to REX.X, only used when a
402  // register is used for index in SIB Byte.
403  //
404  //  1: Same as REX.X=0 (must be 1 in 32-bit mode)
405  //  0: Same as REX.X=1 (64-bit mode only)
406  unsigned char VEX_X = 0x1;
407
408  // VEX_B:
409  //
410  //  1: Same as REX_B=0 (ignored in 32-bit mode)
411  //  0: Same as REX_B=1 (64 bit mode only)
412  //
413  unsigned char VEX_B = 0x1;
414
415  // VEX_W: opcode specific (use like REX.W, or used for
416  // opcode extension, or ignored, depending on the opcode byte)
417  unsigned char VEX_W = 0;
418
419  // VEX_5M (VEX m-mmmmm field):
420  //
421  //  0b00000: Reserved for future use
422  //  0b00001: implied 0F leading opcode
423  //  0b00010: implied 0F 38 leading opcode bytes
424  //  0b00011: implied 0F 3A leading opcode bytes
425  //  0b00100-0b11111: Reserved for future use
426  //
427  unsigned char VEX_5M = 0x1;
428
429  // VEX_4V (VEX vvvv field): a register specifier
430  // (in 1's complement form) or 1111 if unused.
431  unsigned char VEX_4V = 0xf;
432
433  // VEX_L (Vector Length):
434  //
435  //  0: scalar or 128-bit vector
436  //  1: 256-bit vector
437  //
438  unsigned char VEX_L = 0;
439
440  // VEX_PP: opcode extension providing equivalent
441  // functionality of a SIMD prefix
442  //
443  //  0b00: None
444  //  0b01: 66
445  //  0b10: F3
446  //  0b11: F2
447  //
448  unsigned char VEX_PP = 0;
449
450  // FIXME: BEXTR uses VEX.vvvv for Operand 3 instead of Operand 2
451  unsigned Opcode = MI.getOpcode();
452  bool IsBEXTR = (Opcode == X86::BEXTR32rr || Opcode == X86::BEXTR32rm ||
453                  Opcode == X86::BEXTR64rr || Opcode == X86::BEXTR64rm);
454
455  // Encode the operand size opcode prefix as needed.
456  if (TSFlags & X86II::OpSize)
457    VEX_PP = 0x01;
458
459  if ((TSFlags >> X86II::VEXShift) & X86II::VEX_W)
460    VEX_W = 1;
461
462  if ((TSFlags >> X86II::VEXShift) & X86II::VEX_L)
463    VEX_L = 1;
464
465  switch (TSFlags & X86II::Op0Mask) {
466  default: assert(0 && "Invalid prefix!");
467  case X86II::T8:  // 0F 38
468    VEX_5M = 0x2;
469    break;
470  case X86II::TA:  // 0F 3A
471    VEX_5M = 0x3;
472    break;
473  case X86II::TF:  // F2 0F 38
474    VEX_PP = 0x3;
475    VEX_5M = 0x2;
476    break;
477  case X86II::XS:  // F3 0F
478    VEX_PP = 0x2;
479    break;
480  case X86II::XD:  // F2 0F
481    VEX_PP = 0x3;
482    break;
483  case X86II::A6:  // Bypass: Not used by VEX
484  case X86II::A7:  // Bypass: Not used by VEX
485  case X86II::TB:  // Bypass: Not used by VEX
486  case 0:
487    break;  // No prefix!
488  }
489
490  // Set the vector length to 256-bit if YMM0-YMM15 is used
491  for (unsigned i = 0; i != MI.getNumOperands(); ++i) {
492    if (!MI.getOperand(i).isReg())
493      continue;
494    unsigned SrcReg = MI.getOperand(i).getReg();
495    if (SrcReg >= X86::YMM0 && SrcReg <= X86::YMM15)
496      VEX_L = 1;
497  }
498
499  // Classify VEX_B, VEX_4V, VEX_R, VEX_X
500  unsigned CurOp = 0;
501  switch (TSFlags & X86II::FormMask) {
502  case X86II::MRMInitReg: assert(0 && "FIXME: Remove this!");
503  case X86II::MRMDestMem: {
504    // MRMDestMem instructions forms:
505    //  MemAddr, src1(ModR/M)
506    //  MemAddr, src1(VEX_4V), src2(ModR/M)
507    //  MemAddr, src1(ModR/M), imm8
508    //
509    if (X86II::isX86_64ExtendedReg(MI.getOperand(X86::AddrBaseReg).getReg()))
510      VEX_B = 0x0;
511    if (X86II::isX86_64ExtendedReg(MI.getOperand(X86::AddrIndexReg).getReg()))
512      VEX_X = 0x0;
513
514    CurOp = X86::AddrNumOperands;
515    if (HasVEX_4V)
516      VEX_4V = getVEXRegisterEncoding(MI, CurOp++);
517
518    const MCOperand &MO = MI.getOperand(CurOp);
519    if (MO.isReg() && X86II::isX86_64ExtendedReg(MO.getReg()))
520      VEX_R = 0x0;
521    break;
522  }
523  case X86II::MRMSrcMem:
524    // MRMSrcMem instructions forms:
525    //  src1(ModR/M), MemAddr
526    //  src1(ModR/M), src2(VEX_4V), MemAddr
527    //  src1(ModR/M), MemAddr, imm8
528    //  src1(ModR/M), MemAddr, src2(VEX_I8IMM)
529    //
530    if (X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
531      VEX_R = 0x0;
532
533    // FIXME: BEXTR uses VEX.vvvv for Operand 3
534    if (HasVEX_4V && !IsBEXTR)
535      VEX_4V = getVEXRegisterEncoding(MI, 1);
536
537    if (X86II::isX86_64ExtendedReg(
538               MI.getOperand(MemOperand+X86::AddrBaseReg).getReg()))
539      VEX_B = 0x0;
540    if (X86II::isX86_64ExtendedReg(
541               MI.getOperand(MemOperand+X86::AddrIndexReg).getReg()))
542      VEX_X = 0x0;
543
544    if (IsBEXTR)
545      VEX_4V = getVEXRegisterEncoding(MI, X86::AddrNumOperands+1);
546    break;
547  case X86II::MRM0m: case X86II::MRM1m:
548  case X86II::MRM2m: case X86II::MRM3m:
549  case X86II::MRM4m: case X86II::MRM5m:
550  case X86II::MRM6m: case X86II::MRM7m: {
551    // MRM[0-9]m instructions forms:
552    //  MemAddr
553    //  src1(VEX_4V), MemAddr
554    if (HasVEX_4V)
555      VEX_4V = getVEXRegisterEncoding(MI, 0);
556
557    if (X86II::isX86_64ExtendedReg(
558               MI.getOperand(MemOperand+X86::AddrBaseReg).getReg()))
559      VEX_B = 0x0;
560    if (X86II::isX86_64ExtendedReg(
561               MI.getOperand(MemOperand+X86::AddrIndexReg).getReg()))
562      VEX_X = 0x0;
563    break;
564  }
565  case X86II::MRMSrcReg:
566    // MRMSrcReg instructions forms:
567    //  dst(ModR/M), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
568    //  dst(ModR/M), src1(ModR/M)
569    //  dst(ModR/M), src1(ModR/M), imm8
570    //
571    if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
572      VEX_R = 0x0;
573    CurOp++;
574
575    // FIXME: BEXTR uses VEX.vvvv for Operand 3
576    if (HasVEX_4V && !IsBEXTR)
577      VEX_4V = getVEXRegisterEncoding(MI, CurOp++);
578    if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
579      VEX_B = 0x0;
580    CurOp++;
581    if (IsBEXTR)
582      VEX_4V = getVEXRegisterEncoding(MI, CurOp);
583    break;
584  case X86II::MRMDestReg:
585    // MRMDestReg instructions forms:
586    //  dst(ModR/M), src(ModR/M)
587    //  dst(ModR/M), src(ModR/M), imm8
588    if (X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
589      VEX_B = 0x0;
590    if (X86II::isX86_64ExtendedReg(MI.getOperand(1).getReg()))
591      VEX_R = 0x0;
592    break;
593  case X86II::MRM0r: case X86II::MRM1r:
594  case X86II::MRM2r: case X86II::MRM3r:
595  case X86II::MRM4r: case X86II::MRM5r:
596  case X86II::MRM6r: case X86II::MRM7r:
597    // MRM0r-MRM7r instructions forms:
598    //  dst(VEX_4V), src(ModR/M), imm8
599    VEX_4V = getVEXRegisterEncoding(MI, 0);
600    if (X86II::isX86_64ExtendedReg(MI.getOperand(1).getReg()))
601      VEX_B = 0x0;
602    break;
603  default: // RawFrm
604    break;
605  }
606
607  // Emit segment override opcode prefix as needed.
608  EmitSegmentOverridePrefix(TSFlags, CurByte, MemOperand, MI, OS);
609
610  // VEX opcode prefix can have 2 or 3 bytes
611  //
612  //  3 bytes:
613  //    +-----+ +--------------+ +-------------------+
614  //    | C4h | | RXB | m-mmmm | | W | vvvv | L | pp |
615  //    +-----+ +--------------+ +-------------------+
616  //  2 bytes:
617  //    +-----+ +-------------------+
618  //    | C5h | | R | vvvv | L | pp |
619  //    +-----+ +-------------------+
620  //
621  unsigned char LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3);
622
623  if (VEX_B && VEX_X && !VEX_W && (VEX_5M == 1)) { // 2 byte VEX prefix
624    EmitByte(0xC5, CurByte, OS);
625    EmitByte(LastByte | (VEX_R << 7), CurByte, OS);
626    return;
627  }
628
629  // 3 byte VEX prefix
630  EmitByte(0xC4, CurByte, OS);
631  EmitByte(VEX_R << 7 | VEX_X << 6 | VEX_B << 5 | VEX_5M, CurByte, OS);
632  EmitByte(LastByte | (VEX_W << 7), CurByte, OS);
633}
634
635/// DetermineREXPrefix - Determine if the MCInst has to be encoded with a X86-64
636/// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand
637/// size, and 3) use of X86-64 extended registers.
638static unsigned DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
639                                   const MCInstrDesc &Desc) {
640  unsigned REX = 0;
641  if (TSFlags & X86II::REX_W)
642    REX |= 1 << 3; // set REX.W
643
644  if (MI.getNumOperands() == 0) return REX;
645
646  unsigned NumOps = MI.getNumOperands();
647  // FIXME: MCInst should explicitize the two-addrness.
648  bool isTwoAddr = NumOps > 1 &&
649                      Desc.getOperandConstraint(1, MCOI::TIED_TO) != -1;
650
651  // If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix.
652  unsigned i = isTwoAddr ? 1 : 0;
653  for (; i != NumOps; ++i) {
654    const MCOperand &MO = MI.getOperand(i);
655    if (!MO.isReg()) continue;
656    unsigned Reg = MO.getReg();
657    if (!X86II::isX86_64NonExtLowByteReg(Reg)) continue;
658    // FIXME: The caller of DetermineREXPrefix slaps this prefix onto anything
659    // that returns non-zero.
660    REX |= 0x40; // REX fixed encoding prefix
661    break;
662  }
663
664  switch (TSFlags & X86II::FormMask) {
665  case X86II::MRMInitReg: assert(0 && "FIXME: Remove this!");
666  case X86II::MRMSrcReg:
667    if (MI.getOperand(0).isReg() &&
668        X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
669      REX |= 1 << 2; // set REX.R
670    i = isTwoAddr ? 2 : 1;
671    for (; i != NumOps; ++i) {
672      const MCOperand &MO = MI.getOperand(i);
673      if (MO.isReg() && X86II::isX86_64ExtendedReg(MO.getReg()))
674        REX |= 1 << 0; // set REX.B
675    }
676    break;
677  case X86II::MRMSrcMem: {
678    if (MI.getOperand(0).isReg() &&
679        X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
680      REX |= 1 << 2; // set REX.R
681    unsigned Bit = 0;
682    i = isTwoAddr ? 2 : 1;
683    for (; i != NumOps; ++i) {
684      const MCOperand &MO = MI.getOperand(i);
685      if (MO.isReg()) {
686        if (X86II::isX86_64ExtendedReg(MO.getReg()))
687          REX |= 1 << Bit; // set REX.B (Bit=0) and REX.X (Bit=1)
688        Bit++;
689      }
690    }
691    break;
692  }
693  case X86II::MRM0m: case X86II::MRM1m:
694  case X86II::MRM2m: case X86II::MRM3m:
695  case X86II::MRM4m: case X86II::MRM5m:
696  case X86II::MRM6m: case X86II::MRM7m:
697  case X86II::MRMDestMem: {
698    unsigned e = (isTwoAddr ? X86::AddrNumOperands+1 : X86::AddrNumOperands);
699    i = isTwoAddr ? 1 : 0;
700    if (NumOps > e && MI.getOperand(e).isReg() &&
701        X86II::isX86_64ExtendedReg(MI.getOperand(e).getReg()))
702      REX |= 1 << 2; // set REX.R
703    unsigned Bit = 0;
704    for (; i != e; ++i) {
705      const MCOperand &MO = MI.getOperand(i);
706      if (MO.isReg()) {
707        if (X86II::isX86_64ExtendedReg(MO.getReg()))
708          REX |= 1 << Bit; // REX.B (Bit=0) and REX.X (Bit=1)
709        Bit++;
710      }
711    }
712    break;
713  }
714  default:
715    if (MI.getOperand(0).isReg() &&
716        X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
717      REX |= 1 << 0; // set REX.B
718    i = isTwoAddr ? 2 : 1;
719    for (unsigned e = NumOps; i != e; ++i) {
720      const MCOperand &MO = MI.getOperand(i);
721      if (MO.isReg() && X86II::isX86_64ExtendedReg(MO.getReg()))
722        REX |= 1 << 2; // set REX.R
723    }
724    break;
725  }
726  return REX;
727}
728
729/// EmitSegmentOverridePrefix - Emit segment override opcode prefix as needed
730void X86MCCodeEmitter::EmitSegmentOverridePrefix(uint64_t TSFlags,
731                                        unsigned &CurByte, int MemOperand,
732                                        const MCInst &MI,
733                                        raw_ostream &OS) const {
734  switch (TSFlags & X86II::SegOvrMask) {
735  default: assert(0 && "Invalid segment!");
736  case 0:
737    // No segment override, check for explicit one on memory operand.
738    if (MemOperand != -1) {   // If the instruction has a memory operand.
739      switch (MI.getOperand(MemOperand+X86::AddrSegmentReg).getReg()) {
740      default: assert(0 && "Unknown segment register!");
741      case 0: break;
742      case X86::CS: EmitByte(0x2E, CurByte, OS); break;
743      case X86::SS: EmitByte(0x36, CurByte, OS); break;
744      case X86::DS: EmitByte(0x3E, CurByte, OS); break;
745      case X86::ES: EmitByte(0x26, CurByte, OS); break;
746      case X86::FS: EmitByte(0x64, CurByte, OS); break;
747      case X86::GS: EmitByte(0x65, CurByte, OS); break;
748      }
749    }
750    break;
751  case X86II::FS:
752    EmitByte(0x64, CurByte, OS);
753    break;
754  case X86II::GS:
755    EmitByte(0x65, CurByte, OS);
756    break;
757  }
758}
759
760/// EmitOpcodePrefix - Emit all instruction prefixes prior to the opcode.
761///
762/// MemOperand is the operand # of the start of a memory operand if present.  If
763/// Not present, it is -1.
764void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
765                                        int MemOperand, const MCInst &MI,
766                                        const MCInstrDesc &Desc,
767                                        raw_ostream &OS) const {
768
769  // Emit the lock opcode prefix as needed.
770  if (TSFlags & X86II::LOCK)
771    EmitByte(0xF0, CurByte, OS);
772
773  // Emit segment override opcode prefix as needed.
774  EmitSegmentOverridePrefix(TSFlags, CurByte, MemOperand, MI, OS);
775
776  // Emit the repeat opcode prefix as needed.
777  if ((TSFlags & X86II::Op0Mask) == X86II::REP)
778    EmitByte(0xF3, CurByte, OS);
779
780  // Emit the address size opcode prefix as needed.
781  if ((TSFlags & X86II::AdSize) ||
782      (MemOperand != -1 && is64BitMode() && Is32BitMemOperand(MI, MemOperand)))
783    EmitByte(0x67, CurByte, OS);
784
785  // Emit the operand size opcode prefix as needed.
786  if (TSFlags & X86II::OpSize)
787    EmitByte(0x66, CurByte, OS);
788
789  bool Need0FPrefix = false;
790  switch (TSFlags & X86II::Op0Mask) {
791  default: assert(0 && "Invalid prefix!");
792  case 0: break;  // No prefix!
793  case X86II::REP: break; // already handled.
794  case X86II::TB:  // Two-byte opcode prefix
795  case X86II::T8:  // 0F 38
796  case X86II::TA:  // 0F 3A
797  case X86II::A6:  // 0F A6
798  case X86II::A7:  // 0F A7
799    Need0FPrefix = true;
800    break;
801  case X86II::TF: // F2 0F 38
802    EmitByte(0xF2, CurByte, OS);
803    Need0FPrefix = true;
804    break;
805  case X86II::XS:   // F3 0F
806    EmitByte(0xF3, CurByte, OS);
807    Need0FPrefix = true;
808    break;
809  case X86II::XD:   // F2 0F
810    EmitByte(0xF2, CurByte, OS);
811    Need0FPrefix = true;
812    break;
813  case X86II::D8: EmitByte(0xD8, CurByte, OS); break;
814  case X86II::D9: EmitByte(0xD9, CurByte, OS); break;
815  case X86II::DA: EmitByte(0xDA, CurByte, OS); break;
816  case X86II::DB: EmitByte(0xDB, CurByte, OS); break;
817  case X86II::DC: EmitByte(0xDC, CurByte, OS); break;
818  case X86II::DD: EmitByte(0xDD, CurByte, OS); break;
819  case X86II::DE: EmitByte(0xDE, CurByte, OS); break;
820  case X86II::DF: EmitByte(0xDF, CurByte, OS); break;
821  }
822
823  // Handle REX prefix.
824  // FIXME: Can this come before F2 etc to simplify emission?
825  if (is64BitMode()) {
826    if (unsigned REX = DetermineREXPrefix(MI, TSFlags, Desc))
827      EmitByte(0x40 | REX, CurByte, OS);
828  }
829
830  // 0x0F escape code must be emitted just before the opcode.
831  if (Need0FPrefix)
832    EmitByte(0x0F, CurByte, OS);
833
834  // FIXME: Pull this up into previous switch if REX can be moved earlier.
835  switch (TSFlags & X86II::Op0Mask) {
836  case X86II::TF:    // F2 0F 38
837  case X86II::T8:    // 0F 38
838    EmitByte(0x38, CurByte, OS);
839    break;
840  case X86II::TA:    // 0F 3A
841    EmitByte(0x3A, CurByte, OS);
842    break;
843  case X86II::A6:    // 0F A6
844    EmitByte(0xA6, CurByte, OS);
845    break;
846  case X86II::A7:    // 0F A7
847    EmitByte(0xA7, CurByte, OS);
848    break;
849  }
850}
851
852void X86MCCodeEmitter::
853EncodeInstruction(const MCInst &MI, raw_ostream &OS,
854                  SmallVectorImpl<MCFixup> &Fixups) const {
855  unsigned Opcode = MI.getOpcode();
856  const MCInstrDesc &Desc = MCII.get(Opcode);
857  uint64_t TSFlags = Desc.TSFlags;
858
859  // Pseudo instructions don't get encoded.
860  if ((TSFlags & X86II::FormMask) == X86II::Pseudo)
861    return;
862
863  // If this is a two-address instruction, skip one of the register operands.
864  // FIXME: This should be handled during MCInst lowering.
865  unsigned NumOps = Desc.getNumOperands();
866  unsigned CurOp = 0;
867  if (NumOps > 1 && Desc.getOperandConstraint(1, MCOI::TIED_TO) != -1)
868    ++CurOp;
869  else if (NumOps > 2 && Desc.getOperandConstraint(NumOps-1, MCOI::TIED_TO)== 0)
870    // Skip the last source operand that is tied_to the dest reg. e.g. LXADD32
871    --NumOps;
872
873  // Keep track of the current byte being emitted.
874  unsigned CurByte = 0;
875
876  // Is this instruction encoded using the AVX VEX prefix?
877  bool HasVEXPrefix = false;
878
879  // It uses the VEX.VVVV field?
880  bool HasVEX_4V = false;
881
882  if ((TSFlags >> X86II::VEXShift) & X86II::VEX)
883    HasVEXPrefix = true;
884  if ((TSFlags >> X86II::VEXShift) & X86II::VEX_4V)
885    HasVEX_4V = true;
886
887  // Determine where the memory operand starts, if present.
888  int MemoryOperand = X86II::getMemoryOperandNo(TSFlags, Opcode);
889  if (MemoryOperand != -1) MemoryOperand += CurOp;
890
891  if (!HasVEXPrefix)
892    EmitOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS);
893  else
894    EmitVEXOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS);
895
896  unsigned char BaseOpcode = X86II::getBaseOpcodeFor(TSFlags);
897
898  if ((TSFlags >> X86II::VEXShift) & X86II::Has3DNow0F0FOpcode)
899    BaseOpcode = 0x0F;   // Weird 3DNow! encoding.
900
901  // FIXME: BEXTR uses VEX.vvvv for Operand 3 instead of Operand 2
902  bool IsBEXTR = (Opcode == X86::BEXTR32rr || Opcode == X86::BEXTR32rm ||
903                  Opcode == X86::BEXTR64rr || Opcode == X86::BEXTR64rm);
904
905  unsigned SrcRegNum = 0;
906  switch (TSFlags & X86II::FormMask) {
907  case X86II::MRMInitReg:
908    assert(0 && "FIXME: Remove this form when the JIT moves to MCCodeEmitter!");
909  default: errs() << "FORM: " << (TSFlags & X86II::FormMask) << "\n";
910    assert(0 && "Unknown FormMask value in X86MCCodeEmitter!");
911  case X86II::Pseudo:
912    assert(0 && "Pseudo instruction shouldn't be emitted");
913  case X86II::RawFrm:
914    EmitByte(BaseOpcode, CurByte, OS);
915    break;
916  case X86II::RawFrmImm8:
917    EmitByte(BaseOpcode, CurByte, OS);
918    EmitImmediate(MI.getOperand(CurOp++),
919                  X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
920                  CurByte, OS, Fixups);
921    EmitImmediate(MI.getOperand(CurOp++), 1, FK_Data_1, CurByte, OS, Fixups);
922    break;
923  case X86II::RawFrmImm16:
924    EmitByte(BaseOpcode, CurByte, OS);
925    EmitImmediate(MI.getOperand(CurOp++),
926                  X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
927                  CurByte, OS, Fixups);
928    EmitImmediate(MI.getOperand(CurOp++), 2, FK_Data_2, CurByte, OS, Fixups);
929    break;
930
931  case X86II::AddRegFrm:
932    EmitByte(BaseOpcode + GetX86RegNum(MI.getOperand(CurOp++)), CurByte, OS);
933    break;
934
935  case X86II::MRMDestReg:
936    EmitByte(BaseOpcode, CurByte, OS);
937    EmitRegModRMByte(MI.getOperand(CurOp),
938                     GetX86RegNum(MI.getOperand(CurOp+1)), CurByte, OS);
939    CurOp += 2;
940    break;
941
942  case X86II::MRMDestMem:
943    EmitByte(BaseOpcode, CurByte, OS);
944    SrcRegNum = CurOp + X86::AddrNumOperands;
945
946    if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
947      SrcRegNum++;
948
949    EmitMemModRMByte(MI, CurOp,
950                     GetX86RegNum(MI.getOperand(SrcRegNum)),
951                     TSFlags, CurByte, OS, Fixups);
952    CurOp = SrcRegNum + 1;
953    break;
954
955  case X86II::MRMSrcReg:
956    EmitByte(BaseOpcode, CurByte, OS);
957    SrcRegNum = CurOp + 1;
958
959    if (HasVEX_4V && !IsBEXTR) // Skip 1st src (which is encoded in VEX_VVVV)
960      SrcRegNum++;
961
962    EmitRegModRMByte(MI.getOperand(SrcRegNum),
963                     GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS);
964    CurOp = SrcRegNum + 1;
965    if (IsBEXTR)
966      ++CurOp;
967    break;
968
969  case X86II::MRMSrcMem: {
970    int AddrOperands = X86::AddrNumOperands;
971    unsigned FirstMemOp = CurOp+1;
972    if (HasVEX_4V && !IsBEXTR) {
973      ++AddrOperands;
974      ++FirstMemOp;  // Skip the register source (which is encoded in VEX_VVVV).
975    }
976
977    EmitByte(BaseOpcode, CurByte, OS);
978
979    EmitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
980                     TSFlags, CurByte, OS, Fixups);
981    CurOp += AddrOperands + 1;
982    if (IsBEXTR)
983      ++CurOp;
984    break;
985  }
986
987  case X86II::MRM0r: case X86II::MRM1r:
988  case X86II::MRM2r: case X86II::MRM3r:
989  case X86II::MRM4r: case X86II::MRM5r:
990  case X86II::MRM6r: case X86II::MRM7r:
991    if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
992      CurOp++;
993    EmitByte(BaseOpcode, CurByte, OS);
994    EmitRegModRMByte(MI.getOperand(CurOp++),
995                     (TSFlags & X86II::FormMask)-X86II::MRM0r,
996                     CurByte, OS);
997    break;
998  case X86II::MRM0m: case X86II::MRM1m:
999  case X86II::MRM2m: case X86II::MRM3m:
1000  case X86II::MRM4m: case X86II::MRM5m:
1001  case X86II::MRM6m: case X86II::MRM7m:
1002    if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
1003      CurOp++;
1004    EmitByte(BaseOpcode, CurByte, OS);
1005    EmitMemModRMByte(MI, CurOp, (TSFlags & X86II::FormMask)-X86II::MRM0m,
1006                     TSFlags, CurByte, OS, Fixups);
1007    CurOp += X86::AddrNumOperands;
1008    break;
1009  case X86II::MRM_C1:
1010    EmitByte(BaseOpcode, CurByte, OS);
1011    EmitByte(0xC1, CurByte, OS);
1012    break;
1013  case X86II::MRM_C2:
1014    EmitByte(BaseOpcode, CurByte, OS);
1015    EmitByte(0xC2, CurByte, OS);
1016    break;
1017  case X86II::MRM_C3:
1018    EmitByte(BaseOpcode, CurByte, OS);
1019    EmitByte(0xC3, CurByte, OS);
1020    break;
1021  case X86II::MRM_C4:
1022    EmitByte(BaseOpcode, CurByte, OS);
1023    EmitByte(0xC4, CurByte, OS);
1024    break;
1025  case X86II::MRM_C8:
1026    EmitByte(BaseOpcode, CurByte, OS);
1027    EmitByte(0xC8, CurByte, OS);
1028    break;
1029  case X86II::MRM_C9:
1030    EmitByte(BaseOpcode, CurByte, OS);
1031    EmitByte(0xC9, CurByte, OS);
1032    break;
1033  case X86II::MRM_E8:
1034    EmitByte(BaseOpcode, CurByte, OS);
1035    EmitByte(0xE8, CurByte, OS);
1036    break;
1037  case X86II::MRM_F0:
1038    EmitByte(BaseOpcode, CurByte, OS);
1039    EmitByte(0xF0, CurByte, OS);
1040    break;
1041  case X86II::MRM_F8:
1042    EmitByte(BaseOpcode, CurByte, OS);
1043    EmitByte(0xF8, CurByte, OS);
1044    break;
1045  case X86II::MRM_F9:
1046    EmitByte(BaseOpcode, CurByte, OS);
1047    EmitByte(0xF9, CurByte, OS);
1048    break;
1049  case X86II::MRM_D0:
1050    EmitByte(BaseOpcode, CurByte, OS);
1051    EmitByte(0xD0, CurByte, OS);
1052    break;
1053  case X86II::MRM_D1:
1054    EmitByte(BaseOpcode, CurByte, OS);
1055    EmitByte(0xD1, CurByte, OS);
1056    break;
1057  }
1058
1059  // If there is a remaining operand, it must be a trailing immediate.  Emit it
1060  // according to the right size for the instruction.
1061  if (CurOp != NumOps) {
1062    // The last source register of a 4 operand instruction in AVX is encoded
1063    // in bits[7:4] of a immediate byte, and bits[3:0] are ignored.
1064    if ((TSFlags >> X86II::VEXShift) & X86II::VEX_I8IMM) {
1065      const MCOperand &MO = MI.getOperand(CurOp++);
1066      bool IsExtReg = X86II::isX86_64ExtendedReg(MO.getReg());
1067      unsigned RegNum = (IsExtReg ? (1 << 7) : 0);
1068      RegNum |= GetX86RegNum(MO) << 4;
1069      EmitImmediate(MCOperand::CreateImm(RegNum), 1, FK_Data_1, CurByte, OS,
1070                    Fixups);
1071    } else {
1072      unsigned FixupKind;
1073      // FIXME: Is there a better way to know that we need a signed relocation?
1074      if (MI.getOpcode() == X86::ADD64ri32 ||
1075          MI.getOpcode() == X86::MOV64ri32 ||
1076          MI.getOpcode() == X86::MOV64mi32 ||
1077          MI.getOpcode() == X86::PUSH64i32)
1078        FixupKind = X86::reloc_signed_4byte;
1079      else
1080        FixupKind = getImmFixupKind(TSFlags);
1081      EmitImmediate(MI.getOperand(CurOp++),
1082                    X86II::getSizeOfImm(TSFlags), MCFixupKind(FixupKind),
1083                    CurByte, OS, Fixups);
1084    }
1085  }
1086
1087  if ((TSFlags >> X86II::VEXShift) & X86II::Has3DNow0F0FOpcode)
1088    EmitByte(X86II::getBaseOpcodeFor(TSFlags), CurByte, OS);
1089
1090#ifndef NDEBUG
1091  // FIXME: Verify.
1092  if (/*!Desc.isVariadic() &&*/ CurOp != NumOps) {
1093    errs() << "Cannot encode all operands of: ";
1094    MI.dump();
1095    errs() << '\n';
1096    abort();
1097  }
1098#endif
1099}
1100