1
2/*---------------------------------------------------------------*/
3/*--- begin                                   host_x86_defs.h ---*/
4/*---------------------------------------------------------------*/
5
6/*
7   This file is part of Valgrind, a dynamic binary instrumentation
8   framework.
9
10   Copyright (C) 2004-2013 OpenWorks LLP
11      info@open-works.net
12
13   This program is free software; you can redistribute it and/or
14   modify it under the terms of the GNU General Public License as
15   published by the Free Software Foundation; either version 2 of the
16   License, or (at your option) any later version.
17
18   This program is distributed in the hope that it will be useful, but
19   WITHOUT ANY WARRANTY; without even the implied warranty of
20   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21   General Public License for more details.
22
23   You should have received a copy of the GNU General Public License
24   along with this program; if not, write to the Free Software
25   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26   02110-1301, USA.
27
28   The GNU General Public License is contained in the file COPYING.
29
30   Neither the names of the U.S. Department of Energy nor the
31   University of California nor the names of its contributors may be
32   used to endorse or promote products derived from this software
33   without prior written permission.
34*/
35
36#ifndef __VEX_HOST_X86_DEFS_H
37#define __VEX_HOST_X86_DEFS_H
38
39#include "libvex_basictypes.h"
40#include "libvex.h"                      // VexArch
41#include "host_generic_regs.h"           // HReg
42
43/* --------- Registers. --------- */
44
45/* The usual HReg abstraction.  There are 8 real int regs,
46   6 real float regs, and 8 real vector regs.
47*/
48
49extern void ppHRegX86 ( HReg );
50
51extern HReg hregX86_EAX ( void );
52extern HReg hregX86_EBX ( void );
53extern HReg hregX86_ECX ( void );
54extern HReg hregX86_EDX ( void );
55extern HReg hregX86_ESP ( void );
56extern HReg hregX86_EBP ( void );
57extern HReg hregX86_ESI ( void );
58extern HReg hregX86_EDI ( void );
59
60extern HReg hregX86_FAKE0 ( void );
61extern HReg hregX86_FAKE1 ( void );
62extern HReg hregX86_FAKE2 ( void );
63extern HReg hregX86_FAKE3 ( void );
64extern HReg hregX86_FAKE4 ( void );
65extern HReg hregX86_FAKE5 ( void );
66
67extern HReg hregX86_XMM0 ( void );
68extern HReg hregX86_XMM1 ( void );
69extern HReg hregX86_XMM2 ( void );
70extern HReg hregX86_XMM3 ( void );
71extern HReg hregX86_XMM4 ( void );
72extern HReg hregX86_XMM5 ( void );
73extern HReg hregX86_XMM6 ( void );
74extern HReg hregX86_XMM7 ( void );
75
76
77/* --------- Condition codes, Intel encoding. --------- */
78
79typedef
80   enum {
81      Xcc_O      = 0,  /* overflow           */
82      Xcc_NO     = 1,  /* no overflow        */
83
84      Xcc_B      = 2,  /* below              */
85      Xcc_NB     = 3,  /* not below          */
86
87      Xcc_Z      = 4,  /* zero               */
88      Xcc_NZ     = 5,  /* not zero           */
89
90      Xcc_BE     = 6,  /* below or equal     */
91      Xcc_NBE    = 7,  /* not below or equal */
92
93      Xcc_S      = 8,  /* negative           */
94      Xcc_NS     = 9,  /* not negative       */
95
96      Xcc_P      = 10, /* parity even        */
97      Xcc_NP     = 11, /* not parity even    */
98
99      Xcc_L      = 12, /* jump less          */
100      Xcc_NL     = 13, /* not less           */
101
102      Xcc_LE     = 14, /* less or equal      */
103      Xcc_NLE    = 15, /* not less or equal  */
104
105      Xcc_ALWAYS = 16  /* the usual hack     */
106   }
107   X86CondCode;
108
109extern const HChar* showX86CondCode ( X86CondCode );
110
111
112/* --------- Memory address expressions (amodes). --------- */
113
114typedef
115   enum {
116     Xam_IR,        /* Immediate + Reg */
117     Xam_IRRS       /* Immediate + Reg1 + (Reg2 << Shift) */
118   }
119   X86AModeTag;
120
121typedef
122   struct {
123      X86AModeTag tag;
124      union {
125         struct {
126            UInt imm;
127            HReg reg;
128         } IR;
129         struct {
130            UInt imm;
131            HReg base;
132            HReg index;
133            Int  shift; /* 0, 1, 2 or 3 only */
134         } IRRS;
135      } Xam;
136   }
137   X86AMode;
138
139extern X86AMode* X86AMode_IR   ( UInt, HReg );
140extern X86AMode* X86AMode_IRRS ( UInt, HReg, HReg, Int );
141
142extern X86AMode* dopyX86AMode ( X86AMode* );
143
144extern void ppX86AMode ( X86AMode* );
145
146
147/* --------- Operand, which can be reg, immediate or memory. --------- */
148
149typedef
150   enum {
151      Xrmi_Imm,
152      Xrmi_Reg,
153      Xrmi_Mem
154   }
155   X86RMITag;
156
157typedef
158   struct {
159      X86RMITag tag;
160      union {
161         struct {
162            UInt imm32;
163         } Imm;
164         struct {
165            HReg reg;
166         } Reg;
167         struct {
168            X86AMode* am;
169         } Mem;
170      }
171      Xrmi;
172   }
173   X86RMI;
174
175extern X86RMI* X86RMI_Imm ( UInt );
176extern X86RMI* X86RMI_Reg ( HReg );
177extern X86RMI* X86RMI_Mem ( X86AMode* );
178
179extern void ppX86RMI ( X86RMI* );
180
181
182/* --------- Operand, which can be reg or immediate only. --------- */
183
184typedef
185   enum {
186      Xri_Imm,
187      Xri_Reg
188   }
189   X86RITag;
190
191typedef
192   struct {
193      X86RITag tag;
194      union {
195         struct {
196            UInt imm32;
197         } Imm;
198         struct {
199            HReg reg;
200         } Reg;
201      }
202      Xri;
203   }
204   X86RI;
205
206extern X86RI* X86RI_Imm ( UInt );
207extern X86RI* X86RI_Reg ( HReg );
208
209extern void ppX86RI ( X86RI* );
210
211
212/* --------- Operand, which can be reg or memory only. --------- */
213
214typedef
215   enum {
216      Xrm_Reg,
217      Xrm_Mem
218   }
219   X86RMTag;
220
221typedef
222   struct {
223      X86RMTag tag;
224      union {
225         struct {
226            HReg reg;
227         } Reg;
228         struct {
229            X86AMode* am;
230         } Mem;
231      }
232      Xrm;
233   }
234   X86RM;
235
236extern X86RM* X86RM_Reg ( HReg );
237extern X86RM* X86RM_Mem ( X86AMode* );
238
239extern void ppX86RM ( X86RM* );
240
241
242/* --------- Instructions. --------- */
243
244/* --------- */
245typedef
246   enum {
247      Xun_NEG,
248      Xun_NOT
249   }
250   X86UnaryOp;
251
252extern const HChar* showX86UnaryOp ( X86UnaryOp );
253
254
255/* --------- */
256typedef
257   enum {
258      Xalu_INVALID,
259      Xalu_MOV,
260      Xalu_CMP,
261      Xalu_ADD, Xalu_SUB, Xalu_ADC, Xalu_SBB,
262      Xalu_AND, Xalu_OR, Xalu_XOR,
263      Xalu_MUL
264   }
265   X86AluOp;
266
267extern const HChar* showX86AluOp ( X86AluOp );
268
269
270/* --------- */
271typedef
272   enum {
273      Xsh_INVALID,
274      Xsh_SHL, Xsh_SHR, Xsh_SAR
275   }
276   X86ShiftOp;
277
278extern const HChar* showX86ShiftOp ( X86ShiftOp );
279
280
281/* --------- */
282typedef
283   enum {
284      Xfp_INVALID,
285      /* Binary */
286      Xfp_ADD, Xfp_SUB, Xfp_MUL, Xfp_DIV,
287      Xfp_SCALE, Xfp_ATAN, Xfp_YL2X, Xfp_YL2XP1, Xfp_PREM, Xfp_PREM1,
288      /* Unary */
289      Xfp_SQRT, Xfp_ABS, Xfp_NEG, Xfp_MOV, Xfp_SIN, Xfp_COS, Xfp_TAN,
290      Xfp_ROUND, Xfp_2XM1
291   }
292   X86FpOp;
293
294extern const HChar* showX86FpOp ( X86FpOp );
295
296
297/* --------- */
298typedef
299   enum {
300      Xsse_INVALID,
301      /* mov */
302      Xsse_MOV,
303      /* Floating point binary */
304      Xsse_ADDF, Xsse_SUBF, Xsse_MULF, Xsse_DIVF,
305      Xsse_MAXF, Xsse_MINF,
306      Xsse_CMPEQF, Xsse_CMPLTF, Xsse_CMPLEF, Xsse_CMPUNF,
307      /* Floating point unary */
308      Xsse_RCPF, Xsse_RSQRTF, Xsse_SQRTF,
309      /* Bitwise */
310      Xsse_AND, Xsse_OR, Xsse_XOR, Xsse_ANDN,
311      /* Integer binary */
312      Xsse_ADD8,   Xsse_ADD16,   Xsse_ADD32,   Xsse_ADD64,
313      Xsse_QADD8U, Xsse_QADD16U,
314      Xsse_QADD8S, Xsse_QADD16S,
315      Xsse_SUB8,   Xsse_SUB16,   Xsse_SUB32,   Xsse_SUB64,
316      Xsse_QSUB8U, Xsse_QSUB16U,
317      Xsse_QSUB8S, Xsse_QSUB16S,
318      Xsse_MUL16,
319      Xsse_MULHI16U,
320      Xsse_MULHI16S,
321      Xsse_AVG8U, Xsse_AVG16U,
322      Xsse_MAX16S,
323      Xsse_MAX8U,
324      Xsse_MIN16S,
325      Xsse_MIN8U,
326      Xsse_CMPEQ8,  Xsse_CMPEQ16,  Xsse_CMPEQ32,
327      Xsse_CMPGT8S, Xsse_CMPGT16S, Xsse_CMPGT32S,
328      Xsse_SHL16, Xsse_SHL32, Xsse_SHL64,
329      Xsse_SHR16, Xsse_SHR32, Xsse_SHR64,
330      Xsse_SAR16, Xsse_SAR32,
331      Xsse_PACKSSD, Xsse_PACKSSW, Xsse_PACKUSW,
332      Xsse_UNPCKHB, Xsse_UNPCKHW, Xsse_UNPCKHD, Xsse_UNPCKHQ,
333      Xsse_UNPCKLB, Xsse_UNPCKLW, Xsse_UNPCKLD, Xsse_UNPCKLQ
334   }
335   X86SseOp;
336
337extern const HChar* showX86SseOp ( X86SseOp );
338
339
340/* --------- */
341typedef
342   enum {
343      Xin_Alu32R,    /* 32-bit mov/arith/logical, dst=REG */
344      Xin_Alu32M,    /* 32-bit mov/arith/logical, dst=MEM */
345      Xin_Sh32,      /* 32-bit shift/rotate, dst=REG */
346      Xin_Test32,    /* 32-bit test of REG or MEM against imm32 (AND, set
347                        flags, discard result) */
348      Xin_Unary32,   /* 32-bit not and neg */
349      Xin_Lea32,     /* 32-bit compute EA into a reg */
350      Xin_MulL,      /* 32 x 32 -> 64 multiply */
351      Xin_Div,       /* 64/32 -> (32,32) div and mod */
352      Xin_Sh3232,    /* shldl or shrdl */
353      Xin_Push,      /* push (32-bit?) value on stack */
354      Xin_Call,      /* call to address in register */
355      Xin_XDirect,   /* direct transfer to GA */
356      Xin_XIndir,    /* indirect transfer to GA */
357      Xin_XAssisted, /* assisted transfer to GA */
358      Xin_CMov32,    /* conditional move */
359      Xin_LoadEX,    /* mov{s,z}{b,w}l from mem to reg */
360      Xin_Store,     /* store 16/8 bit value in memory */
361      Xin_Set32,     /* convert condition code to 32-bit value */
362      Xin_Bsfr32,    /* 32-bit bsf/bsr */
363      Xin_MFence,    /* mem fence (not just sse2, but sse0 and 1/mmxext too) */
364      Xin_ACAS,      /* 8/16/32-bit lock;cmpxchg */
365      Xin_DACAS,     /* lock;cmpxchg8b (doubleword ACAS, 2 x 32-bit only) */
366
367      Xin_FpUnary,   /* FP fake unary op */
368      Xin_FpBinary,  /* FP fake binary op */
369      Xin_FpLdSt,    /* FP fake load/store */
370      Xin_FpLdStI,   /* FP fake load/store, converting to/from Int */
371      Xin_Fp64to32,  /* FP round IEEE754 double to IEEE754 single */
372      Xin_FpCMov,    /* FP fake floating point conditional move */
373      Xin_FpLdCW,    /* fldcw */
374      Xin_FpStSW_AX, /* fstsw %ax */
375      Xin_FpCmp,     /* FP compare, generating a C320 value into int reg */
376
377      Xin_SseConst,  /* Generate restricted SSE literal */
378      Xin_SseLdSt,   /* SSE load/store, no alignment constraints */
379      Xin_SseLdzLO,  /* SSE load low 32/64 bits, zero remainder of reg */
380      Xin_Sse32Fx4,  /* SSE binary, 32Fx4 */
381      Xin_Sse32FLo,  /* SSE binary, 32F in lowest lane only */
382      Xin_Sse64Fx2,  /* SSE binary, 64Fx2 */
383      Xin_Sse64FLo,  /* SSE binary, 64F in lowest lane only */
384      Xin_SseReRg,   /* SSE binary general reg-reg, Re, Rg */
385      Xin_SseCMov,   /* SSE conditional move */
386      Xin_SseShuf,   /* SSE2 shuffle (pshufd) */
387      Xin_EvCheck,   /* Event check */
388      Xin_ProfInc    /* 64-bit profile counter increment */
389   }
390   X86InstrTag;
391
392/* Destinations are on the RIGHT (second operand) */
393
394typedef
395   struct {
396      X86InstrTag tag;
397      union {
398         struct {
399            X86AluOp op;
400            X86RMI*  src;
401            HReg     dst;
402         } Alu32R;
403         struct {
404            X86AluOp  op;
405            X86RI*    src;
406            X86AMode* dst;
407         } Alu32M;
408         struct {
409            X86ShiftOp op;
410            UInt  src;  /* shift amount, or 0 means %cl */
411            HReg  dst;
412         } Sh32;
413         struct {
414            UInt   imm32;
415            X86RM* dst; /* not written, only read */
416         } Test32;
417         /* Not and Neg */
418         struct {
419            X86UnaryOp op;
420            HReg       dst;
421         } Unary32;
422         /* 32-bit compute EA into a reg */
423         struct {
424            X86AMode* am;
425            HReg      dst;
426         } Lea32;
427         /* EDX:EAX = EAX *s/u r/m32 */
428         struct {
429            Bool   syned;
430            X86RM* src;
431         } MulL;
432         /* x86 div/idiv instruction.  Modifies EDX and EAX and reads src. */
433         struct {
434            Bool   syned;
435            X86RM* src;
436         } Div;
437         /* shld/shrd.  op may only be Xsh_SHL or Xsh_SHR */
438         struct {
439            X86ShiftOp op;
440            UInt       amt;   /* shift amount, or 0 means %cl */
441            HReg       src;
442            HReg       dst;
443         } Sh3232;
444         struct {
445            X86RMI* src;
446         } Push;
447         /* Pseudo-insn.  Call target (an absolute address), on given
448            condition (which could be Xcc_ALWAYS). */
449         struct {
450            X86CondCode cond;
451            Addr32      target;
452            Int         regparms; /* 0 .. 3 */
453            RetLoc      rloc;     /* where the return value will be */
454         } Call;
455         /* Update the guest EIP value, then exit requesting to chain
456            to it.  May be conditional.  Urr, use of Addr32 implicitly
457            assumes that wordsize(guest) == wordsize(host). */
458         struct {
459            Addr32      dstGA;    /* next guest address */
460            X86AMode*   amEIP;    /* amode in guest state for EIP */
461            X86CondCode cond;     /* can be Xcc_ALWAYS */
462            Bool        toFastEP; /* chain to the slow or fast point? */
463         } XDirect;
464         /* Boring transfer to a guest address not known at JIT time.
465            Not chainable.  May be conditional. */
466         struct {
467            HReg        dstGA;
468            X86AMode*   amEIP;
469            X86CondCode cond; /* can be Xcc_ALWAYS */
470         } XIndir;
471         /* Assisted transfer to a guest address, most general case.
472            Not chainable.  May be conditional. */
473         struct {
474            HReg        dstGA;
475            X86AMode*   amEIP;
476            X86CondCode cond; /* can be Xcc_ALWAYS */
477            IRJumpKind  jk;
478         } XAssisted;
479         /* Mov src to dst on the given condition, which may not
480            be the bogus Xcc_ALWAYS. */
481         struct {
482            X86CondCode cond;
483            X86RM*      src;
484            HReg        dst;
485         } CMov32;
486         /* Sign/Zero extending loads.  Dst size is always 32 bits. */
487         struct {
488            UChar     szSmall;
489            Bool      syned;
490            X86AMode* src;
491            HReg      dst;
492         } LoadEX;
493         /* 16/8 bit stores, which are troublesome (particularly
494            8-bit) */
495         struct {
496            UChar     sz; /* only 1 or 2 */
497            HReg      src;
498            X86AMode* dst;
499         } Store;
500         /* Convert a x86 condition code to a 32-bit value (0 or 1). */
501         struct {
502            X86CondCode cond;
503            HReg        dst;
504         } Set32;
505         /* 32-bit bsf or bsr. */
506         struct {
507            Bool isFwds;
508            HReg src;
509            HReg dst;
510         } Bsfr32;
511         /* Mem fence (not just sse2, but sse0 and sse1/mmxext too).
512            In short, an insn which flushes all preceding loads and
513            stores as much as possible before continuing.  On SSE2
514            we emit a real "mfence", on SSE1 or the MMXEXT subset
515            "sfence ; lock addl $0,0(%esp)" and on SSE0
516            "lock addl $0,0(%esp)".  This insn therefore carries the
517            host's hwcaps so the assembler knows what to emit. */
518         struct {
519            UInt hwcaps;
520         } MFence;
521         /* "lock;cmpxchg": mem address in .addr,
522             expected value in %eax, new value in %ebx */
523         struct {
524            X86AMode* addr;
525            UChar     sz; /* 1, 2 or 4 */
526         } ACAS;
527         /* "lock;cmpxchg8b": mem address in .addr, expected value in
528            %edx:%eax, new value in %ecx:%ebx */
529         struct {
530            X86AMode* addr;
531         } DACAS;
532
533         /* X86 Floating point (fake 3-operand, "flat reg file" insns) */
534         struct {
535            X86FpOp op;
536            HReg    src;
537            HReg    dst;
538         } FpUnary;
539         struct {
540            X86FpOp op;
541            HReg    srcL;
542            HReg    srcR;
543            HReg    dst;
544         } FpBinary;
545         struct {
546            Bool      isLoad;
547            UChar     sz; /* only 4 (IEEE single) or 8 (IEEE double) */
548            HReg      reg;
549            X86AMode* addr;
550         } FpLdSt;
551         /* Move 64-bit float to/from memory, converting to/from
552            signed int on the way.  Note the conversions will observe
553            the host FPU rounding mode currently in force. */
554         struct {
555            Bool      isLoad;
556            UChar     sz; /* only 2, 4 or 8 */
557            HReg      reg;
558            X86AMode* addr;
559         } FpLdStI;
560         /* By observing the current FPU rounding mode, round (etc)
561            src into dst given that dst should be interpreted as an
562            IEEE754 32-bit (float) type. */
563         struct {
564            HReg src;
565            HReg dst;
566         } Fp64to32;
567         /* Mov src to dst on the given condition, which may not
568            be the bogus Xcc_ALWAYS. */
569         struct {
570            X86CondCode cond;
571            HReg        src;
572            HReg        dst;
573         } FpCMov;
574         /* Load the FPU's 16-bit control word (fldcw) */
575         struct {
576            X86AMode* addr;
577         }
578         FpLdCW;
579         /* fstsw %ax */
580         struct {
581            /* no fields */
582         }
583         FpStSW_AX;
584         /* Do a compare, generating the C320 bits into the dst. */
585         struct {
586            HReg    srcL;
587            HReg    srcR;
588            HReg    dst;
589         } FpCmp;
590
591         /* Simplistic SSE[123] */
592         struct {
593            UShort  con;
594            HReg    dst;
595         } SseConst;
596         struct {
597            Bool      isLoad;
598            HReg      reg;
599            X86AMode* addr;
600         } SseLdSt;
601         struct {
602            UChar     sz; /* 4 or 8 only */
603            HReg      reg;
604            X86AMode* addr;
605         } SseLdzLO;
606         struct {
607            X86SseOp op;
608            HReg     src;
609            HReg     dst;
610         } Sse32Fx4;
611         struct {
612            X86SseOp op;
613            HReg     src;
614            HReg     dst;
615         } Sse32FLo;
616         struct {
617            X86SseOp op;
618            HReg     src;
619            HReg     dst;
620         } Sse64Fx2;
621         struct {
622            X86SseOp op;
623            HReg     src;
624            HReg     dst;
625         } Sse64FLo;
626         struct {
627            X86SseOp op;
628            HReg     src;
629            HReg     dst;
630         } SseReRg;
631         /* Mov src to dst on the given condition, which may not
632            be the bogus Xcc_ALWAYS. */
633         struct {
634            X86CondCode cond;
635            HReg        src;
636            HReg        dst;
637         } SseCMov;
638         struct {
639            Int    order; /* 0 <= order <= 0xFF */
640            HReg   src;
641            HReg   dst;
642         } SseShuf;
643         struct {
644            X86AMode* amCounter;
645            X86AMode* amFailAddr;
646         } EvCheck;
647         struct {
648            /* No fields.  The address of the counter to inc is
649               installed later, post-translation, by patching it in,
650               as it is not known at translation time. */
651         } ProfInc;
652
653      } Xin;
654   }
655   X86Instr;
656
657extern X86Instr* X86Instr_Alu32R    ( X86AluOp, X86RMI*, HReg );
658extern X86Instr* X86Instr_Alu32M    ( X86AluOp, X86RI*,  X86AMode* );
659extern X86Instr* X86Instr_Unary32   ( X86UnaryOp op, HReg dst );
660extern X86Instr* X86Instr_Lea32     ( X86AMode* am, HReg dst );
661
662extern X86Instr* X86Instr_Sh32      ( X86ShiftOp, UInt, HReg );
663extern X86Instr* X86Instr_Test32    ( UInt imm32, X86RM* dst );
664extern X86Instr* X86Instr_MulL      ( Bool syned, X86RM* );
665extern X86Instr* X86Instr_Div       ( Bool syned, X86RM* );
666extern X86Instr* X86Instr_Sh3232    ( X86ShiftOp, UInt amt, HReg src, HReg dst );
667extern X86Instr* X86Instr_Push      ( X86RMI* );
668extern X86Instr* X86Instr_Call      ( X86CondCode, Addr32, Int, RetLoc );
669extern X86Instr* X86Instr_XDirect   ( Addr32 dstGA, X86AMode* amEIP,
670                                      X86CondCode cond, Bool toFastEP );
671extern X86Instr* X86Instr_XIndir    ( HReg dstGA, X86AMode* amEIP,
672                                      X86CondCode cond );
673extern X86Instr* X86Instr_XAssisted ( HReg dstGA, X86AMode* amEIP,
674                                      X86CondCode cond, IRJumpKind jk );
675extern X86Instr* X86Instr_CMov32    ( X86CondCode, X86RM* src, HReg dst );
676extern X86Instr* X86Instr_LoadEX    ( UChar szSmall, Bool syned,
677                                      X86AMode* src, HReg dst );
678extern X86Instr* X86Instr_Store     ( UChar sz, HReg src, X86AMode* dst );
679extern X86Instr* X86Instr_Set32     ( X86CondCode cond, HReg dst );
680extern X86Instr* X86Instr_Bsfr32    ( Bool isFwds, HReg src, HReg dst );
681extern X86Instr* X86Instr_MFence    ( UInt hwcaps );
682extern X86Instr* X86Instr_ACAS      ( X86AMode* addr, UChar sz );
683extern X86Instr* X86Instr_DACAS     ( X86AMode* addr );
684
685extern X86Instr* X86Instr_FpUnary   ( X86FpOp op, HReg src, HReg dst );
686extern X86Instr* X86Instr_FpBinary  ( X86FpOp op, HReg srcL, HReg srcR, HReg dst );
687extern X86Instr* X86Instr_FpLdSt    ( Bool isLoad, UChar sz, HReg reg, X86AMode* );
688extern X86Instr* X86Instr_FpLdStI   ( Bool isLoad, UChar sz, HReg reg, X86AMode* );
689extern X86Instr* X86Instr_Fp64to32  ( HReg src, HReg dst );
690extern X86Instr* X86Instr_FpCMov    ( X86CondCode, HReg src, HReg dst );
691extern X86Instr* X86Instr_FpLdCW    ( X86AMode* );
692extern X86Instr* X86Instr_FpStSW_AX ( void );
693extern X86Instr* X86Instr_FpCmp     ( HReg srcL, HReg srcR, HReg dst );
694
695extern X86Instr* X86Instr_SseConst  ( UShort con, HReg dst );
696extern X86Instr* X86Instr_SseLdSt   ( Bool isLoad, HReg, X86AMode* );
697extern X86Instr* X86Instr_SseLdzLO  ( Int sz, HReg, X86AMode* );
698extern X86Instr* X86Instr_Sse32Fx4  ( X86SseOp, HReg, HReg );
699extern X86Instr* X86Instr_Sse32FLo  ( X86SseOp, HReg, HReg );
700extern X86Instr* X86Instr_Sse64Fx2  ( X86SseOp, HReg, HReg );
701extern X86Instr* X86Instr_Sse64FLo  ( X86SseOp, HReg, HReg );
702extern X86Instr* X86Instr_SseReRg   ( X86SseOp, HReg, HReg );
703extern X86Instr* X86Instr_SseCMov   ( X86CondCode, HReg src, HReg dst );
704extern X86Instr* X86Instr_SseShuf   ( Int order, HReg src, HReg dst );
705extern X86Instr* X86Instr_EvCheck   ( X86AMode* amCounter,
706                                      X86AMode* amFailAddr );
707extern X86Instr* X86Instr_ProfInc   ( void );
708
709
710extern void ppX86Instr ( X86Instr*, Bool );
711
712/* Some functions that insulate the register allocator from details
713   of the underlying instruction set. */
714extern void         getRegUsage_X86Instr ( HRegUsage*, X86Instr*, Bool );
715extern void         mapRegs_X86Instr     ( HRegRemap*, X86Instr*, Bool );
716extern Bool         isMove_X86Instr      ( X86Instr*, HReg*, HReg* );
717extern Int          emit_X86Instr        ( /*MB_MOD*/Bool* is_profInc,
718                                           UChar* buf, Int nbuf, X86Instr* i,
719                                           Bool mode64,
720                                           void* disp_cp_chain_me_to_slowEP,
721                                           void* disp_cp_chain_me_to_fastEP,
722                                           void* disp_cp_xindir,
723                                           void* disp_cp_xassisted );
724
725extern void genSpill_X86  ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
726                            HReg rreg, Int offset, Bool );
727extern void genReload_X86 ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
728                            HReg rreg, Int offset, Bool );
729
730extern X86Instr*    directReload_X86     ( X86Instr* i,
731                                           HReg vreg, Short spill_off );
732extern void         getAllocableRegs_X86 ( Int*, HReg** );
733extern HInstrArray* iselSB_X86           ( IRSB*,
734                                           VexArch,
735                                           VexArchInfo*,
736                                           VexAbiInfo*,
737                                           Int offs_Host_EvC_Counter,
738                                           Int offs_Host_EvC_FailAddr,
739                                           Bool chainingAllowed,
740                                           Bool addProfInc,
741                                           Addr64 max_ga );
742
743/* How big is an event check?  This is kind of a kludge because it
744   depends on the offsets of host_EvC_FAILADDR and host_EvC_COUNTER,
745   and so assumes that they are both <= 128, and so can use the short
746   offset encoding.  This is all checked with assertions, so in the
747   worst case we will merely assert at startup. */
748extern Int evCheckSzB_X86 ( void );
749
750/* Perform a chaining and unchaining of an XDirect jump. */
751extern VexInvalRange chainXDirect_X86 ( void* place_to_chain,
752                                        void* disp_cp_chain_me_EXPECTED,
753                                        void* place_to_jump_to );
754
755extern VexInvalRange unchainXDirect_X86 ( void* place_to_unchain,
756                                          void* place_to_jump_to_EXPECTED,
757                                          void* disp_cp_chain_me );
758
759/* Patch the counter location into an existing ProfInc point. */
760extern VexInvalRange patchProfInc_X86 ( void*  place_to_patch,
761                                        ULong* location_of_counter );
762
763
764#endif /* ndef __VEX_HOST_X86_DEFS_H */
765
766/*---------------------------------------------------------------*/
767/*--- end                                     host_x86_defs.h ---*/
768/*---------------------------------------------------------------*/
769