1
2/*---------------------------------------------------------------*/
3/*--- begin                                 host_amd64_defs.h ---*/
4/*---------------------------------------------------------------*/
5
6/*
7   This file is part of Valgrind, a dynamic binary instrumentation
8   framework.
9
10   Copyright (C) 2004-2015 OpenWorks LLP
11      info@open-works.net
12
13   This program is free software; you can redistribute it and/or
14   modify it under the terms of the GNU General Public License as
15   published by the Free Software Foundation; either version 2 of the
16   License, or (at your option) any later version.
17
18   This program is distributed in the hope that it will be useful, but
19   WITHOUT ANY WARRANTY; without even the implied warranty of
20   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21   General Public License for more details.
22
23   You should have received a copy of the GNU General Public License
24   along with this program; if not, write to the Free Software
25   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26   02110-1301, USA.
27
28   The GNU General Public License is contained in the file COPYING.
29
30   Neither the names of the U.S. Department of Energy nor the
31   University of California nor the names of its contributors may be
32   used to endorse or promote products derived from this software
33   without prior written permission.
34*/
35
36#ifndef __VEX_HOST_AMD64_DEFS_H
37#define __VEX_HOST_AMD64_DEFS_H
38
39#include "libvex_basictypes.h"
40#include "libvex.h"                      // VexArch
41#include "host_generic_regs.h"           // HReg
42
43/* --------- Registers. --------- */
44
45/* The usual HReg abstraction.  There are 16 real int regs, 6 real
46   float regs, and 16 real vector regs.
47*/
48
49#define ST_IN static inline
50ST_IN HReg hregAMD64_RSI   ( void ) { return mkHReg(False, HRcInt64,   6,  0); }
51ST_IN HReg hregAMD64_RDI   ( void ) { return mkHReg(False, HRcInt64,   7,  1); }
52ST_IN HReg hregAMD64_R8    ( void ) { return mkHReg(False, HRcInt64,   8,  2); }
53ST_IN HReg hregAMD64_R9    ( void ) { return mkHReg(False, HRcInt64,   9,  3); }
54ST_IN HReg hregAMD64_R12   ( void ) { return mkHReg(False, HRcInt64,  12,  4); }
55ST_IN HReg hregAMD64_R13   ( void ) { return mkHReg(False, HRcInt64,  13,  5); }
56ST_IN HReg hregAMD64_R14   ( void ) { return mkHReg(False, HRcInt64,  14,  6); }
57ST_IN HReg hregAMD64_R15   ( void ) { return mkHReg(False, HRcInt64,  15,  7); }
58ST_IN HReg hregAMD64_RBX   ( void ) { return mkHReg(False, HRcInt64,   3,  8); }
59
60ST_IN HReg hregAMD64_XMM3  ( void ) { return mkHReg(False, HRcVec128,  3,  9); }
61ST_IN HReg hregAMD64_XMM4  ( void ) { return mkHReg(False, HRcVec128,  4, 10); }
62ST_IN HReg hregAMD64_XMM5  ( void ) { return mkHReg(False, HRcVec128,  5, 11); }
63ST_IN HReg hregAMD64_XMM6  ( void ) { return mkHReg(False, HRcVec128,  6, 12); }
64ST_IN HReg hregAMD64_XMM7  ( void ) { return mkHReg(False, HRcVec128,  7, 13); }
65ST_IN HReg hregAMD64_XMM8  ( void ) { return mkHReg(False, HRcVec128,  8, 14); }
66ST_IN HReg hregAMD64_XMM9  ( void ) { return mkHReg(False, HRcVec128,  9, 15); }
67ST_IN HReg hregAMD64_XMM10 ( void ) { return mkHReg(False, HRcVec128, 10, 16); }
68ST_IN HReg hregAMD64_XMM11 ( void ) { return mkHReg(False, HRcVec128, 11, 17); }
69ST_IN HReg hregAMD64_XMM12 ( void ) { return mkHReg(False, HRcVec128, 12, 18); }
70
71ST_IN HReg hregAMD64_R10   ( void ) { return mkHReg(False, HRcInt64,  10, 19); }
72
73ST_IN HReg hregAMD64_RAX   ( void ) { return mkHReg(False, HRcInt64,   0, 20); }
74ST_IN HReg hregAMD64_RCX   ( void ) { return mkHReg(False, HRcInt64,   1, 21); }
75ST_IN HReg hregAMD64_RDX   ( void ) { return mkHReg(False, HRcInt64,   2, 22); }
76ST_IN HReg hregAMD64_RSP   ( void ) { return mkHReg(False, HRcInt64,   4, 23); }
77ST_IN HReg hregAMD64_RBP   ( void ) { return mkHReg(False, HRcInt64,   5, 24); }
78ST_IN HReg hregAMD64_R11   ( void ) { return mkHReg(False, HRcInt64,  11, 25); }
79
80ST_IN HReg hregAMD64_XMM0  ( void ) { return mkHReg(False, HRcVec128,  0, 26); }
81ST_IN HReg hregAMD64_XMM1  ( void ) { return mkHReg(False, HRcVec128,  1, 27); }
82#undef ST_IN
83
84extern void ppHRegAMD64 ( HReg );
85
86
87/* --------- Condition codes, AMD encoding. --------- */
88
89typedef
90   enum {
91      Acc_O      = 0,  /* overflow           */
92      Acc_NO     = 1,  /* no overflow        */
93
94      Acc_B      = 2,  /* below              */
95      Acc_NB     = 3,  /* not below          */
96
97      Acc_Z      = 4,  /* zero               */
98      Acc_NZ     = 5,  /* not zero           */
99
100      Acc_BE     = 6,  /* below or equal     */
101      Acc_NBE    = 7,  /* not below or equal */
102
103      Acc_S      = 8,  /* negative           */
104      Acc_NS     = 9,  /* not negative       */
105
106      Acc_P      = 10, /* parity even        */
107      Acc_NP     = 11, /* not parity even    */
108
109      Acc_L      = 12, /* jump less          */
110      Acc_NL     = 13, /* not less           */
111
112      Acc_LE     = 14, /* less or equal      */
113      Acc_NLE    = 15, /* not less or equal  */
114
115      Acc_ALWAYS = 16  /* the usual hack     */
116   }
117   AMD64CondCode;
118
119extern const HChar* showAMD64CondCode ( AMD64CondCode );
120
121
122/* --------- Memory address expressions (amodes). --------- */
123
124typedef
125   enum {
126     Aam_IR,        /* Immediate + Reg */
127     Aam_IRRS       /* Immediate + Reg1 + (Reg2 << Shift) */
128   }
129   AMD64AModeTag;
130
131typedef
132   struct {
133      AMD64AModeTag tag;
134      union {
135         struct {
136            UInt imm;
137            HReg reg;
138         } IR;
139         struct {
140            UInt imm;
141            HReg base;
142            HReg index;
143            Int  shift; /* 0, 1, 2 or 3 only */
144         } IRRS;
145      } Aam;
146   }
147   AMD64AMode;
148
149extern AMD64AMode* AMD64AMode_IR   ( UInt, HReg );
150extern AMD64AMode* AMD64AMode_IRRS ( UInt, HReg, HReg, Int );
151
152extern AMD64AMode* dopyAMD64AMode ( AMD64AMode* );
153
154extern void ppAMD64AMode ( AMD64AMode* );
155
156
157/* --------- Operand, which can be reg, immediate or memory. --------- */
158
159typedef
160   enum {
161      Armi_Imm,
162      Armi_Reg,
163      Armi_Mem
164   }
165   AMD64RMITag;
166
167typedef
168   struct {
169      AMD64RMITag tag;
170      union {
171         struct {
172            UInt imm32;
173         } Imm;
174         struct {
175            HReg reg;
176         } Reg;
177         struct {
178            AMD64AMode* am;
179         } Mem;
180      }
181      Armi;
182   }
183   AMD64RMI;
184
185extern AMD64RMI* AMD64RMI_Imm ( UInt );
186extern AMD64RMI* AMD64RMI_Reg ( HReg );
187extern AMD64RMI* AMD64RMI_Mem ( AMD64AMode* );
188
189extern void ppAMD64RMI      ( AMD64RMI* );
190extern void ppAMD64RMI_lo32 ( AMD64RMI* );
191
192
193/* --------- Operand, which can be reg or immediate only. --------- */
194
195typedef
196   enum {
197      Ari_Imm,
198      Ari_Reg
199   }
200   AMD64RITag;
201
202typedef
203   struct {
204      AMD64RITag tag;
205      union {
206         struct {
207            UInt imm32;
208         } Imm;
209         struct {
210            HReg reg;
211         } Reg;
212      }
213      Ari;
214   }
215   AMD64RI;
216
217extern AMD64RI* AMD64RI_Imm ( UInt );
218extern AMD64RI* AMD64RI_Reg ( HReg );
219
220extern void ppAMD64RI ( AMD64RI* );
221
222
223/* --------- Operand, which can be reg or memory only. --------- */
224
225typedef
226   enum {
227      Arm_Reg,
228      Arm_Mem
229   }
230   AMD64RMTag;
231
232typedef
233   struct {
234      AMD64RMTag tag;
235      union {
236         struct {
237            HReg reg;
238         } Reg;
239         struct {
240            AMD64AMode* am;
241         } Mem;
242      }
243      Arm;
244   }
245   AMD64RM;
246
247extern AMD64RM* AMD64RM_Reg ( HReg );
248extern AMD64RM* AMD64RM_Mem ( AMD64AMode* );
249
250extern void ppAMD64RM ( AMD64RM* );
251
252
253/* --------- Instructions. --------- */
254
255/* --------- */
256typedef
257   enum {
258      Aun_NEG,
259      Aun_NOT
260   }
261   AMD64UnaryOp;
262
263extern const HChar* showAMD64UnaryOp ( AMD64UnaryOp );
264
265
266/* --------- */
267typedef
268   enum {
269      Aalu_INVALID,
270      Aalu_MOV,
271      Aalu_CMP,
272      Aalu_ADD, Aalu_SUB, Aalu_ADC, Aalu_SBB,
273      Aalu_AND, Aalu_OR, Aalu_XOR,
274      Aalu_MUL
275   }
276   AMD64AluOp;
277
278extern const HChar* showAMD64AluOp ( AMD64AluOp );
279
280
281/* --------- */
282typedef
283   enum {
284      Ash_INVALID,
285      Ash_SHL, Ash_SHR, Ash_SAR
286   }
287   AMD64ShiftOp;
288
289extern const HChar* showAMD64ShiftOp ( AMD64ShiftOp );
290
291
292/* --------- */
293typedef
294   enum {
295      Afp_INVALID,
296      /* Binary */
297      Afp_SCALE, Afp_ATAN, Afp_YL2X, Afp_YL2XP1, Afp_PREM, Afp_PREM1,
298      /* Unary */
299      Afp_SQRT,
300      Afp_SIN, Afp_COS, Afp_TAN,
301      Afp_ROUND, Afp_2XM1
302   }
303   A87FpOp;
304
305extern const HChar* showA87FpOp ( A87FpOp );
306
307
308/* --------- */
309typedef
310   enum {
311      Asse_INVALID,
312      /* mov */
313      Asse_MOV,
314      /* Floating point binary */
315      Asse_ADDF, Asse_SUBF, Asse_MULF, Asse_DIVF,
316      Asse_MAXF, Asse_MINF,
317      Asse_CMPEQF, Asse_CMPLTF, Asse_CMPLEF, Asse_CMPUNF,
318      /* Floating point unary */
319      Asse_RCPF, Asse_RSQRTF, Asse_SQRTF,
320      /* Bitwise */
321      Asse_AND, Asse_OR, Asse_XOR, Asse_ANDN,
322      Asse_ADD8, Asse_ADD16, Asse_ADD32, Asse_ADD64,
323      Asse_QADD8U, Asse_QADD16U,
324      Asse_QADD8S, Asse_QADD16S,
325      Asse_SUB8, Asse_SUB16, Asse_SUB32, Asse_SUB64,
326      Asse_QSUB8U, Asse_QSUB16U,
327      Asse_QSUB8S, Asse_QSUB16S,
328      Asse_MUL16,
329      Asse_MULHI16U,
330      Asse_MULHI16S,
331      Asse_AVG8U, Asse_AVG16U,
332      Asse_MAX16S,
333      Asse_MAX8U,
334      Asse_MIN16S,
335      Asse_MIN8U,
336      Asse_CMPEQ8, Asse_CMPEQ16, Asse_CMPEQ32,
337      Asse_CMPGT8S, Asse_CMPGT16S, Asse_CMPGT32S,
338      Asse_SHL16, Asse_SHL32, Asse_SHL64,
339      Asse_SHR16, Asse_SHR32, Asse_SHR64,
340      Asse_SAR16, Asse_SAR32,
341      Asse_PACKSSD, Asse_PACKSSW, Asse_PACKUSW,
342      Asse_UNPCKHB, Asse_UNPCKHW, Asse_UNPCKHD, Asse_UNPCKHQ,
343      Asse_UNPCKLB, Asse_UNPCKLW, Asse_UNPCKLD, Asse_UNPCKLQ
344   }
345   AMD64SseOp;
346
347extern const HChar* showAMD64SseOp ( AMD64SseOp );
348
349
350/* --------- */
351typedef
352   enum {
353      Ain_Imm64,       /* Generate 64-bit literal to register */
354      Ain_Alu64R,      /* 64-bit mov/arith/logical, dst=REG */
355      Ain_Alu64M,      /* 64-bit mov/arith/logical, dst=MEM */
356      Ain_Sh64,        /* 64-bit shift/rotate, dst=REG or MEM */
357      Ain_Test64,      /* 64-bit test (AND, set flags, discard result) */
358      Ain_Unary64,     /* 64-bit not and neg */
359      Ain_Lea64,       /* 64-bit compute EA into a reg */
360      Ain_Alu32R,      /* 32-bit add/sub/and/or/xor/cmp, dst=REG (a la Alu64R) */
361      Ain_MulL,        /* widening multiply */
362      Ain_Div,         /* div and mod */
363      Ain_Push,        /* push 64-bit value on stack */
364      Ain_Call,        /* call to address in register */
365      Ain_XDirect,     /* direct transfer to GA */
366      Ain_XIndir,      /* indirect transfer to GA */
367      Ain_XAssisted,   /* assisted transfer to GA */
368      Ain_CMov64,      /* conditional move, 64-bit reg-reg only */
369      Ain_CLoad,       /* cond. load to int reg, 32 bit ZX or 64 bit only */
370      Ain_CStore,      /* cond. store from int reg, 32 or 64 bit only */
371      Ain_MovxLQ,      /* reg-reg move, zx-ing/sx-ing top half */
372      Ain_LoadEX,      /* mov{s,z}{b,w,l}q from mem to reg */
373      Ain_Store,       /* store 32/16/8 bit value in memory */
374      Ain_Set64,       /* convert condition code to 64-bit value */
375      Ain_Bsfr64,      /* 64-bit bsf/bsr */
376      Ain_MFence,      /* mem fence */
377      Ain_ACAS,        /* 8/16/32/64-bit lock;cmpxchg */
378      Ain_DACAS,       /* lock;cmpxchg8b/16b (doubleword ACAS, 2 x
379                          32-bit or 2 x 64-bit only) */
380      Ain_A87Free,     /* free up x87 registers */
381      Ain_A87PushPop,  /* x87 loads/stores */
382      Ain_A87FpOp,     /* x87 operations */
383      Ain_A87LdCW,     /* load x87 control word */
384      Ain_A87StSW,     /* store x87 status word */
385      Ain_LdMXCSR,     /* load %mxcsr */
386      Ain_SseUComIS,   /* ucomisd/ucomiss, then get %rflags into int
387                          register */
388      Ain_SseSI2SF,    /* scalar 32/64 int to 32/64 float conversion */
389      Ain_SseSF2SI,    /* scalar 32/64 float to 32/64 int conversion */
390      Ain_SseSDSS,     /* scalar float32 to/from float64 */
391      Ain_SseLdSt,     /* SSE load/store 32/64/128 bits, no alignment
392                          constraints, upper 96/64/0 bits arbitrary */
393      Ain_SseCStore,   /* SSE conditional store, 128 bit only, any alignment */
394      Ain_SseCLoad,    /* SSE conditional load, 128 bit only, any alignment */
395      Ain_SseLdzLO,    /* SSE load low 32/64 bits, zero remainder of reg */
396      Ain_Sse32Fx4,    /* SSE binary, 32Fx4 */
397      Ain_Sse32FLo,    /* SSE binary, 32F in lowest lane only */
398      Ain_Sse64Fx2,    /* SSE binary, 64Fx2 */
399      Ain_Sse64FLo,    /* SSE binary, 64F in lowest lane only */
400      Ain_SseReRg,     /* SSE binary general reg-reg, Re, Rg */
401      Ain_SseCMov,     /* SSE conditional move */
402      Ain_SseShuf,     /* SSE2 shuffle (pshufd) */
403      //uu Ain_AvxLdSt,     /* AVX load/store 256 bits,
404      //uu                     no alignment constraints */
405      //uu Ain_AvxReRg,     /* AVX binary general reg-reg, Re, Rg */
406      Ain_EvCheck,     /* Event check */
407      Ain_ProfInc      /* 64-bit profile counter increment */
408   }
409   AMD64InstrTag;
410
411/* Destinations are on the RIGHT (second operand) */
412
413typedef
414   struct {
415      AMD64InstrTag tag;
416      union {
417         struct {
418            ULong imm64;
419            HReg  dst;
420         } Imm64;
421         struct {
422            AMD64AluOp op;
423            AMD64RMI*  src;
424            HReg       dst;
425         } Alu64R;
426         struct {
427            AMD64AluOp  op;
428            AMD64RI*    src;
429            AMD64AMode* dst;
430         } Alu64M;
431         struct {
432            AMD64ShiftOp op;
433            UInt         src;  /* shift amount, or 0 means %cl */
434            HReg         dst;
435         } Sh64;
436         struct {
437            UInt   imm32;
438            HReg   dst;
439         } Test64;
440         /* Not and Neg */
441         struct {
442            AMD64UnaryOp op;
443            HReg         dst;
444         } Unary64;
445         /* 64-bit compute EA into a reg */
446         struct {
447            AMD64AMode* am;
448            HReg        dst;
449         } Lea64;
450         /* 32-bit add/sub/and/or/xor/cmp, dst=REG (a la Alu64R) */
451         struct {
452            AMD64AluOp op;
453            AMD64RMI*  src;
454            HReg       dst;
455         } Alu32R;
456         /* 64 x 64 -> 128 bit widening multiply: RDX:RAX = RAX *s/u
457            r/m64 */
458         struct {
459            Bool     syned;
460            AMD64RM* src;
461         } MulL;
462          /* amd64 div/idiv instruction.  Modifies RDX and RAX and
463	     reads src. */
464         struct {
465            Bool     syned;
466            Int      sz; /* 4 or 8 only */
467            AMD64RM* src;
468         } Div;
469         struct {
470            AMD64RMI* src;
471         } Push;
472         /* Pseudo-insn.  Call target (an absolute address), on given
473            condition (which could be Xcc_ALWAYS). */
474         struct {
475            AMD64CondCode cond;
476            Addr64        target;
477            Int           regparms; /* 0 .. 6 */
478            RetLoc        rloc;     /* where the return value will be */
479         } Call;
480         /* Update the guest RIP value, then exit requesting to chain
481            to it.  May be conditional. */
482         struct {
483            Addr64        dstGA;    /* next guest address */
484            AMD64AMode*   amRIP;    /* amode in guest state for RIP */
485            AMD64CondCode cond;     /* can be Acc_ALWAYS */
486            Bool          toFastEP; /* chain to the slow or fast point? */
487         } XDirect;
488         /* Boring transfer to a guest address not known at JIT time.
489            Not chainable.  May be conditional. */
490         struct {
491            HReg          dstGA;
492            AMD64AMode*   amRIP;
493            AMD64CondCode cond; /* can be Acc_ALWAYS */
494         } XIndir;
495         /* Assisted transfer to a guest address, most general case.
496            Not chainable.  May be conditional. */
497         struct {
498            HReg          dstGA;
499            AMD64AMode*   amRIP;
500            AMD64CondCode cond; /* can be Acc_ALWAYS */
501            IRJumpKind    jk;
502         } XAssisted;
503         /* Mov src to dst on the given condition, which may not
504            be the bogus Acc_ALWAYS. */
505         struct {
506            AMD64CondCode cond;
507            HReg          src;
508            HReg          dst;
509         } CMov64;
510         /* conditional load to int reg, 32 bit ZX or 64 bit only.
511            cond may not be Acc_ALWAYS. */
512         struct {
513            AMD64CondCode cond;
514            UChar         szB; /* 4 or 8 only */
515            AMD64AMode*   addr;
516            HReg          dst;
517         } CLoad;
518         /* cond. store from int reg, 32 or 64 bit only.
519            cond may not be Acc_ALWAYS. */
520         struct {
521            AMD64CondCode cond;
522            UChar         szB; /* 4 or 8 only */
523            HReg          src;
524            AMD64AMode*   addr;
525         } CStore;
526         /* reg-reg move, sx-ing/zx-ing top half */
527         struct {
528            Bool syned;
529            HReg src;
530            HReg dst;
531         } MovxLQ;
532         /* Sign/Zero extending loads.  Dst size is always 64 bits. */
533         struct {
534            UChar       szSmall; /* only 1, 2 or 4 */
535            Bool        syned;
536            AMD64AMode* src;
537            HReg        dst;
538         } LoadEX;
539         /* 32/16/8 bit stores. */
540         struct {
541            UChar       sz; /* only 1, 2 or 4 */
542            HReg        src;
543            AMD64AMode* dst;
544         } Store;
545         /* Convert an amd64 condition code to a 64-bit value (0 or 1). */
546         struct {
547            AMD64CondCode cond;
548            HReg          dst;
549         } Set64;
550         /* 64-bit bsf or bsr. */
551         struct {
552            Bool isFwds;
553            HReg src;
554            HReg dst;
555         } Bsfr64;
556         /* Mem fence.  In short, an insn which flushes all preceding
557            loads and stores as much as possible before continuing.
558            On AMD64 we emit a real "mfence". */
559         struct {
560         } MFence;
561         struct {
562            AMD64AMode* addr;
563            UChar       sz; /* 1, 2, 4 or 8 */
564         } ACAS;
565         struct {
566            AMD64AMode* addr;
567            UChar       sz; /* 4 or 8 only */
568         } DACAS;
569
570         /* --- X87 --- */
571
572         /* A very minimal set of x87 insns, that operate exactly in a
573            stack-like way so no need to think about x87 registers. */
574
575         /* Do 'ffree' on %st(7) .. %st(7-nregs) */
576         struct {
577            Int nregs; /* 1 <= nregs <= 7 */
578         } A87Free;
579
580         /* Push a 32- or 64-bit FP value from memory onto the stack,
581            or move a value from the stack to memory and remove it
582            from the stack. */
583         struct {
584            AMD64AMode* addr;
585            Bool        isPush;
586            UChar       szB; /* 4 or 8 */
587         } A87PushPop;
588
589         /* Do an operation on the top-of-stack.  This can be unary, in
590            which case it is %st0 = OP( %st0 ), or binary: %st0 = OP(
591            %st0, %st1 ). */
592         struct {
593            A87FpOp op;
594         } A87FpOp;
595
596         /* Load the FPU control word. */
597         struct {
598            AMD64AMode* addr;
599         } A87LdCW;
600
601         /* Store the FPU status word (fstsw m16) */
602         struct {
603            AMD64AMode* addr;
604         } A87StSW;
605
606         /* --- SSE --- */
607
608         /* Load 32 bits into %mxcsr. */
609         struct {
610            AMD64AMode* addr;
611         }
612         LdMXCSR;
613         /* ucomisd/ucomiss, then get %rflags into int register */
614         struct {
615            UChar   sz;   /* 4 or 8 only */
616            HReg    srcL; /* xmm */
617            HReg    srcR; /* xmm */
618            HReg    dst;  /* int */
619         } SseUComIS;
620         /* scalar 32/64 int to 32/64 float conversion */
621         struct {
622            UChar szS; /* 4 or 8 */
623            UChar szD; /* 4 or 8 */
624            HReg  src; /* i class */
625            HReg  dst; /* v class */
626         } SseSI2SF;
627         /* scalar 32/64 float to 32/64 int conversion */
628         struct {
629            UChar szS; /* 4 or 8 */
630            UChar szD; /* 4 or 8 */
631            HReg  src; /* v class */
632            HReg  dst; /* i class */
633         } SseSF2SI;
634         /* scalar float32 to/from float64 */
635         struct {
636            Bool from64; /* True: 64->32; False: 32->64 */
637            HReg src;
638            HReg dst;
639         } SseSDSS;
640         struct {
641            Bool        isLoad;
642            UChar       sz; /* 4, 8 or 16 only */
643            HReg        reg;
644            AMD64AMode* addr;
645         } SseLdSt;
646         struct {
647            AMD64CondCode cond; /* may not be Acc_ALWAYS */
648            HReg          src;
649            AMD64AMode*   addr;
650         } SseCStore;
651         struct {
652            AMD64CondCode cond; /* may not be Acc_ALWAYS */
653            AMD64AMode*   addr;
654            HReg          dst;
655         } SseCLoad;
656         struct {
657            Int         sz; /* 4 or 8 only */
658            HReg        reg;
659            AMD64AMode* addr;
660         } SseLdzLO;
661         struct {
662            AMD64SseOp op;
663            HReg       src;
664            HReg       dst;
665         } Sse32Fx4;
666         struct {
667            AMD64SseOp op;
668            HReg       src;
669            HReg       dst;
670         } Sse32FLo;
671         struct {
672            AMD64SseOp op;
673            HReg       src;
674            HReg       dst;
675         } Sse64Fx2;
676         struct {
677            AMD64SseOp op;
678            HReg       src;
679            HReg       dst;
680         } Sse64FLo;
681         struct {
682            AMD64SseOp op;
683            HReg       src;
684            HReg       dst;
685         } SseReRg;
686         /* Mov src to dst on the given condition, which may not
687            be the bogus Xcc_ALWAYS. */
688         struct {
689            AMD64CondCode cond;
690            HReg          src;
691            HReg          dst;
692         } SseCMov;
693         struct {
694            Int    order; /* 0 <= order <= 0xFF */
695            HReg   src;
696            HReg   dst;
697         } SseShuf;
698         //uu struct {
699         //uu    Bool        isLoad;
700         //uu    HReg        reg;
701         //uu    AMD64AMode* addr;
702         //uu } AvxLdSt;
703         //uu struct {
704         //uu    AMD64SseOp op;
705         //uu    HReg       src;
706         //uu    HReg       dst;
707         //uu } AvxReRg;
708         struct {
709            AMD64AMode* amCounter;
710            AMD64AMode* amFailAddr;
711         } EvCheck;
712         struct {
713            /* No fields.  The address of the counter to inc is
714               installed later, post-translation, by patching it in,
715               as it is not known at translation time. */
716         } ProfInc;
717
718      } Ain;
719   }
720   AMD64Instr;
721
722extern AMD64Instr* AMD64Instr_Imm64      ( ULong imm64, HReg dst );
723extern AMD64Instr* AMD64Instr_Alu64R     ( AMD64AluOp, AMD64RMI*, HReg );
724extern AMD64Instr* AMD64Instr_Alu64M     ( AMD64AluOp, AMD64RI*,  AMD64AMode* );
725extern AMD64Instr* AMD64Instr_Unary64    ( AMD64UnaryOp op, HReg dst );
726extern AMD64Instr* AMD64Instr_Lea64      ( AMD64AMode* am, HReg dst );
727extern AMD64Instr* AMD64Instr_Alu32R     ( AMD64AluOp, AMD64RMI*, HReg );
728extern AMD64Instr* AMD64Instr_Sh64       ( AMD64ShiftOp, UInt, HReg );
729extern AMD64Instr* AMD64Instr_Test64     ( UInt imm32, HReg dst );
730extern AMD64Instr* AMD64Instr_MulL       ( Bool syned, AMD64RM* );
731extern AMD64Instr* AMD64Instr_Div        ( Bool syned, Int sz, AMD64RM* );
732extern AMD64Instr* AMD64Instr_Push       ( AMD64RMI* );
733extern AMD64Instr* AMD64Instr_Call       ( AMD64CondCode, Addr64, Int, RetLoc );
734extern AMD64Instr* AMD64Instr_XDirect    ( Addr64 dstGA, AMD64AMode* amRIP,
735                                           AMD64CondCode cond, Bool toFastEP );
736extern AMD64Instr* AMD64Instr_XIndir     ( HReg dstGA, AMD64AMode* amRIP,
737                                           AMD64CondCode cond );
738extern AMD64Instr* AMD64Instr_XAssisted  ( HReg dstGA, AMD64AMode* amRIP,
739                                           AMD64CondCode cond, IRJumpKind jk );
740extern AMD64Instr* AMD64Instr_CMov64     ( AMD64CondCode, HReg src, HReg dst );
741extern AMD64Instr* AMD64Instr_CLoad      ( AMD64CondCode cond, UChar szB,
742                                           AMD64AMode* addr, HReg dst );
743extern AMD64Instr* AMD64Instr_CStore     ( AMD64CondCode cond, UChar szB,
744                                           HReg src, AMD64AMode* addr );
745extern AMD64Instr* AMD64Instr_MovxLQ     ( Bool syned, HReg src, HReg dst );
746extern AMD64Instr* AMD64Instr_LoadEX     ( UChar szSmall, Bool syned,
747                                           AMD64AMode* src, HReg dst );
748extern AMD64Instr* AMD64Instr_Store      ( UChar sz, HReg src, AMD64AMode* dst );
749extern AMD64Instr* AMD64Instr_Set64      ( AMD64CondCode cond, HReg dst );
750extern AMD64Instr* AMD64Instr_Bsfr64     ( Bool isFwds, HReg src, HReg dst );
751extern AMD64Instr* AMD64Instr_MFence     ( void );
752extern AMD64Instr* AMD64Instr_ACAS       ( AMD64AMode* addr, UChar sz );
753extern AMD64Instr* AMD64Instr_DACAS      ( AMD64AMode* addr, UChar sz );
754
755extern AMD64Instr* AMD64Instr_A87Free    ( Int nregs );
756extern AMD64Instr* AMD64Instr_A87PushPop ( AMD64AMode* addr, Bool isPush, UChar szB );
757extern AMD64Instr* AMD64Instr_A87FpOp    ( A87FpOp op );
758extern AMD64Instr* AMD64Instr_A87LdCW    ( AMD64AMode* addr );
759extern AMD64Instr* AMD64Instr_A87StSW    ( AMD64AMode* addr );
760extern AMD64Instr* AMD64Instr_LdMXCSR    ( AMD64AMode* );
761extern AMD64Instr* AMD64Instr_SseUComIS  ( Int sz, HReg srcL, HReg srcR, HReg dst );
762extern AMD64Instr* AMD64Instr_SseSI2SF   ( Int szS, Int szD, HReg src, HReg dst );
763extern AMD64Instr* AMD64Instr_SseSF2SI   ( Int szS, Int szD, HReg src, HReg dst );
764extern AMD64Instr* AMD64Instr_SseSDSS    ( Bool from64, HReg src, HReg dst );
765extern AMD64Instr* AMD64Instr_SseLdSt    ( Bool isLoad, Int sz, HReg, AMD64AMode* );
766extern AMD64Instr* AMD64Instr_SseCStore  ( AMD64CondCode, HReg, AMD64AMode* );
767extern AMD64Instr* AMD64Instr_SseCLoad   ( AMD64CondCode, AMD64AMode*, HReg );
768extern AMD64Instr* AMD64Instr_SseLdzLO   ( Int sz, HReg, AMD64AMode* );
769extern AMD64Instr* AMD64Instr_Sse32Fx4   ( AMD64SseOp, HReg, HReg );
770extern AMD64Instr* AMD64Instr_Sse32FLo   ( AMD64SseOp, HReg, HReg );
771extern AMD64Instr* AMD64Instr_Sse64Fx2   ( AMD64SseOp, HReg, HReg );
772extern AMD64Instr* AMD64Instr_Sse64FLo   ( AMD64SseOp, HReg, HReg );
773extern AMD64Instr* AMD64Instr_SseReRg    ( AMD64SseOp, HReg, HReg );
774extern AMD64Instr* AMD64Instr_SseCMov    ( AMD64CondCode, HReg src, HReg dst );
775extern AMD64Instr* AMD64Instr_SseShuf    ( Int order, HReg src, HReg dst );
776//uu extern AMD64Instr* AMD64Instr_AvxLdSt    ( Bool isLoad, HReg, AMD64AMode* );
777//uu extern AMD64Instr* AMD64Instr_AvxReRg    ( AMD64SseOp, HReg, HReg );
778extern AMD64Instr* AMD64Instr_EvCheck    ( AMD64AMode* amCounter,
779                                           AMD64AMode* amFailAddr );
780extern AMD64Instr* AMD64Instr_ProfInc    ( void );
781
782
783extern void ppAMD64Instr ( const AMD64Instr*, Bool );
784
785/* Some functions that insulate the register allocator from details
786   of the underlying instruction set. */
787extern void getRegUsage_AMD64Instr ( HRegUsage*, const AMD64Instr*, Bool );
788extern void mapRegs_AMD64Instr     ( HRegRemap*, AMD64Instr*, Bool );
789extern Bool isMove_AMD64Instr      ( const AMD64Instr*, HReg*, HReg* );
790extern Int          emit_AMD64Instr   ( /*MB_MOD*/Bool* is_profInc,
791                                        UChar* buf, Int nbuf,
792                                        const AMD64Instr* i,
793                                        Bool mode64,
794                                        VexEndness endness_host,
795                                        const void* disp_cp_chain_me_to_slowEP,
796                                        const void* disp_cp_chain_me_to_fastEP,
797                                        const void* disp_cp_xindir,
798                                        const void* disp_cp_xassisted );
799
800extern void genSpill_AMD64  ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
801                              HReg rreg, Int offset, Bool );
802extern void genReload_AMD64 ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
803                              HReg rreg, Int offset, Bool );
804
805extern const RRegUniverse* getRRegUniverse_AMD64 ( void );
806
807extern HInstrArray* iselSB_AMD64           ( const IRSB*,
808                                             VexArch,
809                                             const VexArchInfo*,
810                                             const VexAbiInfo*,
811                                             Int offs_Host_EvC_Counter,
812                                             Int offs_Host_EvC_FailAddr,
813                                             Bool chainingAllowed,
814                                             Bool addProfInc,
815                                             Addr max_ga );
816
817/* How big is an event check?  This is kind of a kludge because it
818   depends on the offsets of host_EvC_FAILADDR and host_EvC_COUNTER,
819   and so assumes that they are both <= 128, and so can use the short
820   offset encoding.  This is all checked with assertions, so in the
821   worst case we will merely assert at startup. */
822extern Int evCheckSzB_AMD64 (void);
823
824/* Perform a chaining and unchaining of an XDirect jump. */
825extern VexInvalRange chainXDirect_AMD64 ( VexEndness endness_host,
826                                          void* place_to_chain,
827                                          const void* disp_cp_chain_me_EXPECTED,
828                                          const void* place_to_jump_to );
829
830extern VexInvalRange unchainXDirect_AMD64 ( VexEndness endness_host,
831                                            void* place_to_unchain,
832                                            const void* place_to_jump_to_EXPECTED,
833                                            const void* disp_cp_chain_me );
834
835/* Patch the counter location into an existing ProfInc point. */
836extern VexInvalRange patchProfInc_AMD64 ( VexEndness endness_host,
837                                          void*  place_to_patch,
838                                          const ULong* location_of_counter );
839
840
841#endif /* ndef __VEX_HOST_AMD64_DEFS_H */
842
843/*---------------------------------------------------------------*/
844/*--- end                                   host_amd64_defs.h ---*/
845/*---------------------------------------------------------------*/
846