1
2/*---------------------------------------------------------------*/
3/*--- begin                                   host_x86_defs.h ---*/
4/*---------------------------------------------------------------*/
5
6/*
7   This file is part of Valgrind, a dynamic binary instrumentation
8   framework.
9
10   Copyright (C) 2004-2017 OpenWorks LLP
11      info@open-works.net
12
13   This program is free software; you can redistribute it and/or
14   modify it under the terms of the GNU General Public License as
15   published by the Free Software Foundation; either version 2 of the
16   License, or (at your option) any later version.
17
18   This program is distributed in the hope that it will be useful, but
19   WITHOUT ANY WARRANTY; without even the implied warranty of
20   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21   General Public License for more details.
22
23   You should have received a copy of the GNU General Public License
24   along with this program; if not, write to the Free Software
25   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26   02110-1301, USA.
27
28   The GNU General Public License is contained in the file COPYING.
29
30   Neither the names of the U.S. Department of Energy nor the
31   University of California nor the names of its contributors may be
32   used to endorse or promote products derived from this software
33   without prior written permission.
34*/
35
36#ifndef __VEX_HOST_X86_DEFS_H
37#define __VEX_HOST_X86_DEFS_H
38
39#include "libvex_basictypes.h"
40#include "libvex.h"                      // VexArch
41#include "host_generic_regs.h"           // HReg
42
43/* --------- Registers. --------- */
44
45/* The usual HReg abstraction.  There are 8 real int regs,
46   6 real float regs, and 8 real vector regs.
47*/
48
49#define ST_IN static inline
50ST_IN HReg hregX86_EAX   ( void ) { return mkHReg(False, HRcInt32,  0,  0); }
51ST_IN HReg hregX86_EBX   ( void ) { return mkHReg(False, HRcInt32,  3,  1); }
52ST_IN HReg hregX86_ECX   ( void ) { return mkHReg(False, HRcInt32,  1,  2); }
53ST_IN HReg hregX86_EDX   ( void ) { return mkHReg(False, HRcInt32,  2,  3); }
54ST_IN HReg hregX86_ESI   ( void ) { return mkHReg(False, HRcInt32,  6,  4); }
55ST_IN HReg hregX86_EDI   ( void ) { return mkHReg(False, HRcInt32,  7,  5); }
56
57ST_IN HReg hregX86_FAKE0 ( void ) { return mkHReg(False, HRcFlt64,  0,  6); }
58ST_IN HReg hregX86_FAKE1 ( void ) { return mkHReg(False, HRcFlt64,  1,  7); }
59ST_IN HReg hregX86_FAKE2 ( void ) { return mkHReg(False, HRcFlt64,  2,  8); }
60ST_IN HReg hregX86_FAKE3 ( void ) { return mkHReg(False, HRcFlt64,  3,  9); }
61ST_IN HReg hregX86_FAKE4 ( void ) { return mkHReg(False, HRcFlt64,  4, 10); }
62ST_IN HReg hregX86_FAKE5 ( void ) { return mkHReg(False, HRcFlt64,  5, 11); }
63
64ST_IN HReg hregX86_XMM0  ( void ) { return mkHReg(False, HRcVec128, 0, 12); }
65ST_IN HReg hregX86_XMM1  ( void ) { return mkHReg(False, HRcVec128, 1, 13); }
66ST_IN HReg hregX86_XMM2  ( void ) { return mkHReg(False, HRcVec128, 2, 14); }
67ST_IN HReg hregX86_XMM3  ( void ) { return mkHReg(False, HRcVec128, 3, 15); }
68ST_IN HReg hregX86_XMM4  ( void ) { return mkHReg(False, HRcVec128, 4, 16); }
69ST_IN HReg hregX86_XMM5  ( void ) { return mkHReg(False, HRcVec128, 5, 17); }
70ST_IN HReg hregX86_XMM6  ( void ) { return mkHReg(False, HRcVec128, 6, 18); }
71ST_IN HReg hregX86_XMM7  ( void ) { return mkHReg(False, HRcVec128, 7, 19); }
72
73ST_IN HReg hregX86_ESP   ( void ) { return mkHReg(False, HRcInt32,  4, 20); }
74ST_IN HReg hregX86_EBP   ( void ) { return mkHReg(False, HRcInt32,  5, 21); }
75#undef ST_IN
76
77extern void ppHRegX86 ( HReg );
78
79
80/* --------- Condition codes, Intel encoding. --------- */
81
82typedef
83   enum {
84      Xcc_O      = 0,  /* overflow           */
85      Xcc_NO     = 1,  /* no overflow        */
86
87      Xcc_B      = 2,  /* below              */
88      Xcc_NB     = 3,  /* not below          */
89
90      Xcc_Z      = 4,  /* zero               */
91      Xcc_NZ     = 5,  /* not zero           */
92
93      Xcc_BE     = 6,  /* below or equal     */
94      Xcc_NBE    = 7,  /* not below or equal */
95
96      Xcc_S      = 8,  /* negative           */
97      Xcc_NS     = 9,  /* not negative       */
98
99      Xcc_P      = 10, /* parity even        */
100      Xcc_NP     = 11, /* not parity even    */
101
102      Xcc_L      = 12, /* jump less          */
103      Xcc_NL     = 13, /* not less           */
104
105      Xcc_LE     = 14, /* less or equal      */
106      Xcc_NLE    = 15, /* not less or equal  */
107
108      Xcc_ALWAYS = 16  /* the usual hack     */
109   }
110   X86CondCode;
111
112extern const HChar* showX86CondCode ( X86CondCode );
113
114
115/* --------- Memory address expressions (amodes). --------- */
116
117typedef
118   enum {
119     Xam_IR,        /* Immediate + Reg */
120     Xam_IRRS       /* Immediate + Reg1 + (Reg2 << Shift) */
121   }
122   X86AModeTag;
123
124typedef
125   struct {
126      X86AModeTag tag;
127      union {
128         struct {
129            UInt imm;
130            HReg reg;
131         } IR;
132         struct {
133            UInt imm;
134            HReg base;
135            HReg index;
136            Int  shift; /* 0, 1, 2 or 3 only */
137         } IRRS;
138      } Xam;
139   }
140   X86AMode;
141
142extern X86AMode* X86AMode_IR   ( UInt, HReg );
143extern X86AMode* X86AMode_IRRS ( UInt, HReg, HReg, Int );
144
145extern X86AMode* dopyX86AMode ( X86AMode* );
146
147extern void ppX86AMode ( X86AMode* );
148
149
150/* --------- Operand, which can be reg, immediate or memory. --------- */
151
152typedef
153   enum {
154      Xrmi_Imm,
155      Xrmi_Reg,
156      Xrmi_Mem
157   }
158   X86RMITag;
159
160typedef
161   struct {
162      X86RMITag tag;
163      union {
164         struct {
165            UInt imm32;
166         } Imm;
167         struct {
168            HReg reg;
169         } Reg;
170         struct {
171            X86AMode* am;
172         } Mem;
173      }
174      Xrmi;
175   }
176   X86RMI;
177
178extern X86RMI* X86RMI_Imm ( UInt );
179extern X86RMI* X86RMI_Reg ( HReg );
180extern X86RMI* X86RMI_Mem ( X86AMode* );
181
182extern void ppX86RMI ( X86RMI* );
183
184
185/* --------- Operand, which can be reg or immediate only. --------- */
186
187typedef
188   enum {
189      Xri_Imm,
190      Xri_Reg
191   }
192   X86RITag;
193
194typedef
195   struct {
196      X86RITag tag;
197      union {
198         struct {
199            UInt imm32;
200         } Imm;
201         struct {
202            HReg reg;
203         } Reg;
204      }
205      Xri;
206   }
207   X86RI;
208
209extern X86RI* X86RI_Imm ( UInt );
210extern X86RI* X86RI_Reg ( HReg );
211
212extern void ppX86RI ( X86RI* );
213
214
215/* --------- Operand, which can be reg or memory only. --------- */
216
217typedef
218   enum {
219      Xrm_Reg,
220      Xrm_Mem
221   }
222   X86RMTag;
223
224typedef
225   struct {
226      X86RMTag tag;
227      union {
228         struct {
229            HReg reg;
230         } Reg;
231         struct {
232            X86AMode* am;
233         } Mem;
234      }
235      Xrm;
236   }
237   X86RM;
238
239extern X86RM* X86RM_Reg ( HReg );
240extern X86RM* X86RM_Mem ( X86AMode* );
241
242extern void ppX86RM ( X86RM* );
243
244
245/* --------- Instructions. --------- */
246
247/* --------- */
248typedef
249   enum {
250      Xun_NEG,
251      Xun_NOT
252   }
253   X86UnaryOp;
254
255extern const HChar* showX86UnaryOp ( X86UnaryOp );
256
257
258/* --------- */
259typedef
260   enum {
261      Xalu_INVALID,
262      Xalu_MOV,
263      Xalu_CMP,
264      Xalu_ADD, Xalu_SUB, Xalu_ADC, Xalu_SBB,
265      Xalu_AND, Xalu_OR, Xalu_XOR,
266      Xalu_MUL
267   }
268   X86AluOp;
269
270extern const HChar* showX86AluOp ( X86AluOp );
271
272
273/* --------- */
274typedef
275   enum {
276      Xsh_INVALID,
277      Xsh_SHL, Xsh_SHR, Xsh_SAR
278   }
279   X86ShiftOp;
280
281extern const HChar* showX86ShiftOp ( X86ShiftOp );
282
283
284/* --------- */
285typedef
286   enum {
287      Xfp_INVALID,
288      /* Binary */
289      Xfp_ADD, Xfp_SUB, Xfp_MUL, Xfp_DIV,
290      Xfp_SCALE, Xfp_ATAN, Xfp_YL2X, Xfp_YL2XP1, Xfp_PREM, Xfp_PREM1,
291      /* Unary */
292      Xfp_SQRT, Xfp_ABS, Xfp_NEG, Xfp_MOV, Xfp_SIN, Xfp_COS, Xfp_TAN,
293      Xfp_ROUND, Xfp_2XM1
294   }
295   X86FpOp;
296
297extern const HChar* showX86FpOp ( X86FpOp );
298
299
300/* --------- */
301typedef
302   enum {
303      Xsse_INVALID,
304      /* mov */
305      Xsse_MOV,
306      /* Floating point binary */
307      Xsse_ADDF, Xsse_SUBF, Xsse_MULF, Xsse_DIVF,
308      Xsse_MAXF, Xsse_MINF,
309      Xsse_CMPEQF, Xsse_CMPLTF, Xsse_CMPLEF, Xsse_CMPUNF,
310      /* Floating point unary */
311      Xsse_RCPF, Xsse_RSQRTF, Xsse_SQRTF,
312      /* Bitwise */
313      Xsse_AND, Xsse_OR, Xsse_XOR, Xsse_ANDN,
314      /* Integer binary */
315      Xsse_ADD8,   Xsse_ADD16,   Xsse_ADD32,   Xsse_ADD64,
316      Xsse_QADD8U, Xsse_QADD16U,
317      Xsse_QADD8S, Xsse_QADD16S,
318      Xsse_SUB8,   Xsse_SUB16,   Xsse_SUB32,   Xsse_SUB64,
319      Xsse_QSUB8U, Xsse_QSUB16U,
320      Xsse_QSUB8S, Xsse_QSUB16S,
321      Xsse_MUL16,
322      Xsse_MULHI16U,
323      Xsse_MULHI16S,
324      Xsse_AVG8U, Xsse_AVG16U,
325      Xsse_MAX16S,
326      Xsse_MAX8U,
327      Xsse_MIN16S,
328      Xsse_MIN8U,
329      Xsse_CMPEQ8,  Xsse_CMPEQ16,  Xsse_CMPEQ32,
330      Xsse_CMPGT8S, Xsse_CMPGT16S, Xsse_CMPGT32S,
331      Xsse_SHL16, Xsse_SHL32, Xsse_SHL64,
332      Xsse_SHR16, Xsse_SHR32, Xsse_SHR64,
333      Xsse_SAR16, Xsse_SAR32,
334      Xsse_PACKSSD, Xsse_PACKSSW, Xsse_PACKUSW,
335      Xsse_UNPCKHB, Xsse_UNPCKHW, Xsse_UNPCKHD, Xsse_UNPCKHQ,
336      Xsse_UNPCKLB, Xsse_UNPCKLW, Xsse_UNPCKLD, Xsse_UNPCKLQ
337   }
338   X86SseOp;
339
340extern const HChar* showX86SseOp ( X86SseOp );
341
342
343/* --------- */
344typedef
345   enum {
346      Xin_Alu32R,    /* 32-bit mov/arith/logical, dst=REG */
347      Xin_Alu32M,    /* 32-bit mov/arith/logical, dst=MEM */
348      Xin_Sh32,      /* 32-bit shift/rotate, dst=REG */
349      Xin_Test32,    /* 32-bit test of REG or MEM against imm32 (AND, set
350                        flags, discard result) */
351      Xin_Unary32,   /* 32-bit not and neg */
352      Xin_Lea32,     /* 32-bit compute EA into a reg */
353      Xin_MulL,      /* 32 x 32 -> 64 multiply */
354      Xin_Div,       /* 64/32 -> (32,32) div and mod */
355      Xin_Sh3232,    /* shldl or shrdl */
356      Xin_Push,      /* push (32-bit?) value on stack */
357      Xin_Call,      /* call to address in register */
358      Xin_XDirect,   /* direct transfer to GA */
359      Xin_XIndir,    /* indirect transfer to GA */
360      Xin_XAssisted, /* assisted transfer to GA */
361      Xin_CMov32,    /* conditional move */
362      Xin_LoadEX,    /* mov{s,z}{b,w}l from mem to reg */
363      Xin_Store,     /* store 16/8 bit value in memory */
364      Xin_Set32,     /* convert condition code to 32-bit value */
365      Xin_Bsfr32,    /* 32-bit bsf/bsr */
366      Xin_MFence,    /* mem fence (not just sse2, but sse0 and 1/mmxext too) */
367      Xin_ACAS,      /* 8/16/32-bit lock;cmpxchg */
368      Xin_DACAS,     /* lock;cmpxchg8b (doubleword ACAS, 2 x 32-bit only) */
369
370      Xin_FpUnary,   /* FP fake unary op */
371      Xin_FpBinary,  /* FP fake binary op */
372      Xin_FpLdSt,    /* FP fake load/store */
373      Xin_FpLdStI,   /* FP fake load/store, converting to/from Int */
374      Xin_Fp64to32,  /* FP round IEEE754 double to IEEE754 single */
375      Xin_FpCMov,    /* FP fake floating point conditional move */
376      Xin_FpLdCW,    /* fldcw */
377      Xin_FpStSW_AX, /* fstsw %ax */
378      Xin_FpCmp,     /* FP compare, generating a C320 value into int reg */
379
380      Xin_SseConst,  /* Generate restricted SSE literal */
381      Xin_SseLdSt,   /* SSE load/store, no alignment constraints */
382      Xin_SseLdzLO,  /* SSE load low 32/64 bits, zero remainder of reg */
383      Xin_Sse32Fx4,  /* SSE binary, 32Fx4 */
384      Xin_Sse32FLo,  /* SSE binary, 32F in lowest lane only */
385      Xin_Sse64Fx2,  /* SSE binary, 64Fx2 */
386      Xin_Sse64FLo,  /* SSE binary, 64F in lowest lane only */
387      Xin_SseReRg,   /* SSE binary general reg-reg, Re, Rg */
388      Xin_SseCMov,   /* SSE conditional move */
389      Xin_SseShuf,   /* SSE2 shuffle (pshufd) */
390      Xin_EvCheck,   /* Event check */
391      Xin_ProfInc    /* 64-bit profile counter increment */
392   }
393   X86InstrTag;
394
395/* Destinations are on the RIGHT (second operand) */
396
397typedef
398   struct {
399      X86InstrTag tag;
400      union {
401         struct {
402            X86AluOp op;
403            X86RMI*  src;
404            HReg     dst;
405         } Alu32R;
406         struct {
407            X86AluOp  op;
408            X86RI*    src;
409            X86AMode* dst;
410         } Alu32M;
411         struct {
412            X86ShiftOp op;
413            UInt  src;  /* shift amount, or 0 means %cl */
414            HReg  dst;
415         } Sh32;
416         struct {
417            UInt   imm32;
418            X86RM* dst; /* not written, only read */
419         } Test32;
420         /* Not and Neg */
421         struct {
422            X86UnaryOp op;
423            HReg       dst;
424         } Unary32;
425         /* 32-bit compute EA into a reg */
426         struct {
427            X86AMode* am;
428            HReg      dst;
429         } Lea32;
430         /* EDX:EAX = EAX *s/u r/m32 */
431         struct {
432            Bool   syned;
433            X86RM* src;
434         } MulL;
435         /* x86 div/idiv instruction.  Modifies EDX and EAX and reads src. */
436         struct {
437            Bool   syned;
438            X86RM* src;
439         } Div;
440         /* shld/shrd.  op may only be Xsh_SHL or Xsh_SHR */
441         struct {
442            X86ShiftOp op;
443            UInt       amt;   /* shift amount, or 0 means %cl */
444            HReg       src;
445            HReg       dst;
446         } Sh3232;
447         struct {
448            X86RMI* src;
449         } Push;
450         /* Pseudo-insn.  Call target (an absolute address), on given
451            condition (which could be Xcc_ALWAYS). */
452         struct {
453            X86CondCode cond;
454            Addr32      target;
455            Int         regparms; /* 0 .. 3 */
456            RetLoc      rloc;     /* where the return value will be */
457         } Call;
458         /* Update the guest EIP value, then exit requesting to chain
459            to it.  May be conditional.  Urr, use of Addr32 implicitly
460            assumes that wordsize(guest) == wordsize(host). */
461         struct {
462            Addr32      dstGA;    /* next guest address */
463            X86AMode*   amEIP;    /* amode in guest state for EIP */
464            X86CondCode cond;     /* can be Xcc_ALWAYS */
465            Bool        toFastEP; /* chain to the slow or fast point? */
466         } XDirect;
467         /* Boring transfer to a guest address not known at JIT time.
468            Not chainable.  May be conditional. */
469         struct {
470            HReg        dstGA;
471            X86AMode*   amEIP;
472            X86CondCode cond; /* can be Xcc_ALWAYS */
473         } XIndir;
474         /* Assisted transfer to a guest address, most general case.
475            Not chainable.  May be conditional. */
476         struct {
477            HReg        dstGA;
478            X86AMode*   amEIP;
479            X86CondCode cond; /* can be Xcc_ALWAYS */
480            IRJumpKind  jk;
481         } XAssisted;
482         /* Mov src to dst on the given condition, which may not
483            be the bogus Xcc_ALWAYS. */
484         struct {
485            X86CondCode cond;
486            X86RM*      src;
487            HReg        dst;
488         } CMov32;
489         /* Sign/Zero extending loads.  Dst size is always 32 bits. */
490         struct {
491            UChar     szSmall;
492            Bool      syned;
493            X86AMode* src;
494            HReg      dst;
495         } LoadEX;
496         /* 16/8 bit stores, which are troublesome (particularly
497            8-bit) */
498         struct {
499            UChar     sz; /* only 1 or 2 */
500            HReg      src;
501            X86AMode* dst;
502         } Store;
503         /* Convert a x86 condition code to a 32-bit value (0 or 1). */
504         struct {
505            X86CondCode cond;
506            HReg        dst;
507         } Set32;
508         /* 32-bit bsf or bsr. */
509         struct {
510            Bool isFwds;
511            HReg src;
512            HReg dst;
513         } Bsfr32;
514         /* Mem fence (not just sse2, but sse0 and sse1/mmxext too).
515            In short, an insn which flushes all preceding loads and
516            stores as much as possible before continuing.  On SSE2
517            we emit a real "mfence", on SSE1 or the MMXEXT subset
518            "sfence ; lock addl $0,0(%esp)" and on SSE0
519            "lock addl $0,0(%esp)".  This insn therefore carries the
520            host's hwcaps so the assembler knows what to emit. */
521         struct {
522            UInt hwcaps;
523         } MFence;
524         /* "lock;cmpxchg": mem address in .addr,
525             expected value in %eax, new value in %ebx */
526         struct {
527            X86AMode* addr;
528            UChar     sz; /* 1, 2 or 4 */
529         } ACAS;
530         /* "lock;cmpxchg8b": mem address in .addr, expected value in
531            %edx:%eax, new value in %ecx:%ebx */
532         struct {
533            X86AMode* addr;
534         } DACAS;
535
536         /* X86 Floating point (fake 3-operand, "flat reg file" insns) */
537         struct {
538            X86FpOp op;
539            HReg    src;
540            HReg    dst;
541         } FpUnary;
542         struct {
543            X86FpOp op;
544            HReg    srcL;
545            HReg    srcR;
546            HReg    dst;
547         } FpBinary;
548         struct {
549            Bool      isLoad;
550            UChar     sz; /* only 4 (IEEE single) or 8 (IEEE double) */
551            HReg      reg;
552            X86AMode* addr;
553         } FpLdSt;
554         /* Move 64-bit float to/from memory, converting to/from
555            signed int on the way.  Note the conversions will observe
556            the host FPU rounding mode currently in force. */
557         struct {
558            Bool      isLoad;
559            UChar     sz; /* only 2, 4 or 8 */
560            HReg      reg;
561            X86AMode* addr;
562         } FpLdStI;
563         /* By observing the current FPU rounding mode, round (etc)
564            src into dst given that dst should be interpreted as an
565            IEEE754 32-bit (float) type. */
566         struct {
567            HReg src;
568            HReg dst;
569         } Fp64to32;
570         /* Mov src to dst on the given condition, which may not
571            be the bogus Xcc_ALWAYS. */
572         struct {
573            X86CondCode cond;
574            HReg        src;
575            HReg        dst;
576         } FpCMov;
577         /* Load the FPU's 16-bit control word (fldcw) */
578         struct {
579            X86AMode* addr;
580         }
581         FpLdCW;
582         /* fstsw %ax */
583         struct {
584            /* no fields */
585         }
586         FpStSW_AX;
587         /* Do a compare, generating the C320 bits into the dst. */
588         struct {
589            HReg    srcL;
590            HReg    srcR;
591            HReg    dst;
592         } FpCmp;
593
594         /* Simplistic SSE[123] */
595         struct {
596            UShort  con;
597            HReg    dst;
598         } SseConst;
599         struct {
600            Bool      isLoad;
601            HReg      reg;
602            X86AMode* addr;
603         } SseLdSt;
604         struct {
605            UChar     sz; /* 4 or 8 only */
606            HReg      reg;
607            X86AMode* addr;
608         } SseLdzLO;
609         struct {
610            X86SseOp op;
611            HReg     src;
612            HReg     dst;
613         } Sse32Fx4;
614         struct {
615            X86SseOp op;
616            HReg     src;
617            HReg     dst;
618         } Sse32FLo;
619         struct {
620            X86SseOp op;
621            HReg     src;
622            HReg     dst;
623         } Sse64Fx2;
624         struct {
625            X86SseOp op;
626            HReg     src;
627            HReg     dst;
628         } Sse64FLo;
629         struct {
630            X86SseOp op;
631            HReg     src;
632            HReg     dst;
633         } SseReRg;
634         /* Mov src to dst on the given condition, which may not
635            be the bogus Xcc_ALWAYS. */
636         struct {
637            X86CondCode cond;
638            HReg        src;
639            HReg        dst;
640         } SseCMov;
641         struct {
642            Int    order; /* 0 <= order <= 0xFF */
643            HReg   src;
644            HReg   dst;
645         } SseShuf;
646         struct {
647            X86AMode* amCounter;
648            X86AMode* amFailAddr;
649         } EvCheck;
650         struct {
651            /* No fields.  The address of the counter to inc is
652               installed later, post-translation, by patching it in,
653               as it is not known at translation time. */
654         } ProfInc;
655
656      } Xin;
657   }
658   X86Instr;
659
660extern X86Instr* X86Instr_Alu32R    ( X86AluOp, X86RMI*, HReg );
661extern X86Instr* X86Instr_Alu32M    ( X86AluOp, X86RI*,  X86AMode* );
662extern X86Instr* X86Instr_Unary32   ( X86UnaryOp op, HReg dst );
663extern X86Instr* X86Instr_Lea32     ( X86AMode* am, HReg dst );
664
665extern X86Instr* X86Instr_Sh32      ( X86ShiftOp, UInt, HReg );
666extern X86Instr* X86Instr_Test32    ( UInt imm32, X86RM* dst );
667extern X86Instr* X86Instr_MulL      ( Bool syned, X86RM* );
668extern X86Instr* X86Instr_Div       ( Bool syned, X86RM* );
669extern X86Instr* X86Instr_Sh3232    ( X86ShiftOp, UInt amt, HReg src, HReg dst );
670extern X86Instr* X86Instr_Push      ( X86RMI* );
671extern X86Instr* X86Instr_Call      ( X86CondCode, Addr32, Int, RetLoc );
672extern X86Instr* X86Instr_XDirect   ( Addr32 dstGA, X86AMode* amEIP,
673                                      X86CondCode cond, Bool toFastEP );
674extern X86Instr* X86Instr_XIndir    ( HReg dstGA, X86AMode* amEIP,
675                                      X86CondCode cond );
676extern X86Instr* X86Instr_XAssisted ( HReg dstGA, X86AMode* amEIP,
677                                      X86CondCode cond, IRJumpKind jk );
678extern X86Instr* X86Instr_CMov32    ( X86CondCode, X86RM* src, HReg dst );
679extern X86Instr* X86Instr_LoadEX    ( UChar szSmall, Bool syned,
680                                      X86AMode* src, HReg dst );
681extern X86Instr* X86Instr_Store     ( UChar sz, HReg src, X86AMode* dst );
682extern X86Instr* X86Instr_Set32     ( X86CondCode cond, HReg dst );
683extern X86Instr* X86Instr_Bsfr32    ( Bool isFwds, HReg src, HReg dst );
684extern X86Instr* X86Instr_MFence    ( UInt hwcaps );
685extern X86Instr* X86Instr_ACAS      ( X86AMode* addr, UChar sz );
686extern X86Instr* X86Instr_DACAS     ( X86AMode* addr );
687
688extern X86Instr* X86Instr_FpUnary   ( X86FpOp op, HReg src, HReg dst );
689extern X86Instr* X86Instr_FpBinary  ( X86FpOp op, HReg srcL, HReg srcR, HReg dst );
690extern X86Instr* X86Instr_FpLdSt    ( Bool isLoad, UChar sz, HReg reg, X86AMode* );
691extern X86Instr* X86Instr_FpLdStI   ( Bool isLoad, UChar sz, HReg reg, X86AMode* );
692extern X86Instr* X86Instr_Fp64to32  ( HReg src, HReg dst );
693extern X86Instr* X86Instr_FpCMov    ( X86CondCode, HReg src, HReg dst );
694extern X86Instr* X86Instr_FpLdCW    ( X86AMode* );
695extern X86Instr* X86Instr_FpStSW_AX ( void );
696extern X86Instr* X86Instr_FpCmp     ( HReg srcL, HReg srcR, HReg dst );
697
698extern X86Instr* X86Instr_SseConst  ( UShort con, HReg dst );
699extern X86Instr* X86Instr_SseLdSt   ( Bool isLoad, HReg, X86AMode* );
700extern X86Instr* X86Instr_SseLdzLO  ( Int sz, HReg, X86AMode* );
701extern X86Instr* X86Instr_Sse32Fx4  ( X86SseOp, HReg, HReg );
702extern X86Instr* X86Instr_Sse32FLo  ( X86SseOp, HReg, HReg );
703extern X86Instr* X86Instr_Sse64Fx2  ( X86SseOp, HReg, HReg );
704extern X86Instr* X86Instr_Sse64FLo  ( X86SseOp, HReg, HReg );
705extern X86Instr* X86Instr_SseReRg   ( X86SseOp, HReg, HReg );
706extern X86Instr* X86Instr_SseCMov   ( X86CondCode, HReg src, HReg dst );
707extern X86Instr* X86Instr_SseShuf   ( Int order, HReg src, HReg dst );
708extern X86Instr* X86Instr_EvCheck   ( X86AMode* amCounter,
709                                      X86AMode* amFailAddr );
710extern X86Instr* X86Instr_ProfInc   ( void );
711
712
713extern void ppX86Instr ( const X86Instr*, Bool );
714
715/* Some functions that insulate the register allocator from details
716   of the underlying instruction set. */
717extern void         getRegUsage_X86Instr ( HRegUsage*, const X86Instr*, Bool );
718extern void         mapRegs_X86Instr     ( HRegRemap*, X86Instr*, Bool );
719extern Bool         isMove_X86Instr      ( const X86Instr*, HReg*, HReg* );
720extern Int          emit_X86Instr   ( /*MB_MOD*/Bool* is_profInc,
721                                      UChar* buf, Int nbuf, const X86Instr* i,
722                                      Bool mode64,
723                                      VexEndness endness_host,
724                                      const void* disp_cp_chain_me_to_slowEP,
725                                      const void* disp_cp_chain_me_to_fastEP,
726                                      const void* disp_cp_xindir,
727                                      const void* disp_cp_xassisted );
728
729extern void genSpill_X86  ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
730                            HReg rreg, Int offset, Bool );
731extern void genReload_X86 ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
732                            HReg rreg, Int offset, Bool );
733
734extern X86Instr* directReload_X86 ( X86Instr* i, HReg vreg, Short spill_off );
735
736extern const RRegUniverse* getRRegUniverse_X86 ( void );
737
738extern HInstrArray* iselSB_X86           ( const IRSB*,
739                                           VexArch,
740                                           const VexArchInfo*,
741                                           const VexAbiInfo*,
742                                           Int offs_Host_EvC_Counter,
743                                           Int offs_Host_EvC_FailAddr,
744                                           Bool chainingAllowed,
745                                           Bool addProfInc,
746                                           Addr max_ga );
747
748/* How big is an event check?  This is kind of a kludge because it
749   depends on the offsets of host_EvC_FAILADDR and host_EvC_COUNTER,
750   and so assumes that they are both <= 128, and so can use the short
751   offset encoding.  This is all checked with assertions, so in the
752   worst case we will merely assert at startup. */
753extern Int evCheckSzB_X86 (void);
754
755/* Perform a chaining and unchaining of an XDirect jump. */
756extern VexInvalRange chainXDirect_X86 ( VexEndness endness_host,
757                                        void* place_to_chain,
758                                        const void* disp_cp_chain_me_EXPECTED,
759                                        const void* place_to_jump_to );
760
761extern VexInvalRange unchainXDirect_X86 ( VexEndness endness_host,
762                                          void* place_to_unchain,
763                                          const void* place_to_jump_to_EXPECTED,
764                                          const void* disp_cp_chain_me );
765
766/* Patch the counter location into an existing ProfInc point. */
767extern VexInvalRange patchProfInc_X86 ( VexEndness endness_host,
768                                        void*  place_to_patch,
769                                        const ULong* location_of_counter );
770
771
772#endif /* ndef __VEX_HOST_X86_DEFS_H */
773
774/*---------------------------------------------------------------*/
775/*--- end                                     host_x86_defs.h ---*/
776/*---------------------------------------------------------------*/
777