1
2/*---------------------------------------------------------------*/
3/*--- begin                                 host_amd64_defs.h ---*/
4/*---------------------------------------------------------------*/
5
6/*
7   This file is part of Valgrind, a dynamic binary instrumentation
8   framework.
9
10   Copyright (C) 2004-2012 OpenWorks LLP
11      info@open-works.net
12
13   This program is free software; you can redistribute it and/or
14   modify it under the terms of the GNU General Public License as
15   published by the Free Software Foundation; either version 2 of the
16   License, or (at your option) any later version.
17
18   This program is distributed in the hope that it will be useful, but
19   WITHOUT ANY WARRANTY; without even the implied warranty of
20   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21   General Public License for more details.
22
23   You should have received a copy of the GNU General Public License
24   along with this program; if not, write to the Free Software
25   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26   02110-1301, USA.
27
28   The GNU General Public License is contained in the file COPYING.
29
30   Neither the names of the U.S. Department of Energy nor the
31   University of California nor the names of its contributors may be
32   used to endorse or promote products derived from this software
33   without prior written permission.
34*/
35
36#ifndef __VEX_HOST_AMD64_DEFS_H
37#define __VEX_HOST_AMD64_DEFS_H
38
39
40/* --------- Registers. --------- */
41
42/* The usual HReg abstraction.  There are 16 real int regs, 6 real
43   float regs, and 16 real vector regs.
44*/
45
46extern void ppHRegAMD64 ( HReg );
47
48extern HReg hregAMD64_RAX ( void );
49extern HReg hregAMD64_RBX ( void );
50extern HReg hregAMD64_RCX ( void );
51extern HReg hregAMD64_RDX ( void );
52extern HReg hregAMD64_RSP ( void );
53extern HReg hregAMD64_RBP ( void );
54extern HReg hregAMD64_RSI ( void );
55extern HReg hregAMD64_RDI ( void );
56extern HReg hregAMD64_R8  ( void );
57extern HReg hregAMD64_R9  ( void );
58extern HReg hregAMD64_R10 ( void );
59extern HReg hregAMD64_R11 ( void );
60extern HReg hregAMD64_R12 ( void );
61extern HReg hregAMD64_R13 ( void );
62extern HReg hregAMD64_R14 ( void );
63extern HReg hregAMD64_R15 ( void );
64
65extern HReg hregAMD64_FAKE0 ( void );
66extern HReg hregAMD64_FAKE1 ( void );
67extern HReg hregAMD64_FAKE2 ( void );
68extern HReg hregAMD64_FAKE3 ( void );
69extern HReg hregAMD64_FAKE4 ( void );
70extern HReg hregAMD64_FAKE5 ( void );
71
72extern HReg hregAMD64_XMM0  ( void );
73extern HReg hregAMD64_XMM1  ( void );
74extern HReg hregAMD64_XMM3  ( void );
75extern HReg hregAMD64_XMM4  ( void );
76extern HReg hregAMD64_XMM5  ( void );
77extern HReg hregAMD64_XMM6  ( void );
78extern HReg hregAMD64_XMM7  ( void );
79extern HReg hregAMD64_XMM8  ( void );
80extern HReg hregAMD64_XMM9  ( void );
81extern HReg hregAMD64_XMM10 ( void );
82extern HReg hregAMD64_XMM11 ( void );
83extern HReg hregAMD64_XMM12 ( void );
84
85
86/* --------- Condition codes, AMD encoding. --------- */
87
88typedef
89   enum {
90      Acc_O      = 0,  /* overflow           */
91      Acc_NO     = 1,  /* no overflow        */
92
93      Acc_B      = 2,  /* below              */
94      Acc_NB     = 3,  /* not below          */
95
96      Acc_Z      = 4,  /* zero               */
97      Acc_NZ     = 5,  /* not zero           */
98
99      Acc_BE     = 6,  /* below or equal     */
100      Acc_NBE    = 7,  /* not below or equal */
101
102      Acc_S      = 8,  /* negative           */
103      Acc_NS     = 9,  /* not negative       */
104
105      Acc_P      = 10, /* parity even        */
106      Acc_NP     = 11, /* not parity even    */
107
108      Acc_L      = 12, /* jump less          */
109      Acc_NL     = 13, /* not less           */
110
111      Acc_LE     = 14, /* less or equal      */
112      Acc_NLE    = 15, /* not less or equal  */
113
114      Acc_ALWAYS = 16  /* the usual hack     */
115   }
116   AMD64CondCode;
117
118extern HChar* showAMD64CondCode ( AMD64CondCode );
119
120
121/* --------- Memory address expressions (amodes). --------- */
122
123typedef
124   enum {
125     Aam_IR,        /* Immediate + Reg */
126     Aam_IRRS       /* Immediate + Reg1 + (Reg2 << Shift) */
127   }
128   AMD64AModeTag;
129
130typedef
131   struct {
132      AMD64AModeTag tag;
133      union {
134         struct {
135            UInt imm;
136            HReg reg;
137         } IR;
138         struct {
139            UInt imm;
140            HReg base;
141            HReg index;
142            Int  shift; /* 0, 1, 2 or 3 only */
143         } IRRS;
144      } Aam;
145   }
146   AMD64AMode;
147
148extern AMD64AMode* AMD64AMode_IR   ( UInt, HReg );
149extern AMD64AMode* AMD64AMode_IRRS ( UInt, HReg, HReg, Int );
150
151extern AMD64AMode* dopyAMD64AMode ( AMD64AMode* );
152
153extern void ppAMD64AMode ( AMD64AMode* );
154
155
156/* --------- Operand, which can be reg, immediate or memory. --------- */
157
158typedef
159   enum {
160      Armi_Imm,
161      Armi_Reg,
162      Armi_Mem
163   }
164   AMD64RMITag;
165
166typedef
167   struct {
168      AMD64RMITag tag;
169      union {
170         struct {
171            UInt imm32;
172         } Imm;
173         struct {
174            HReg reg;
175         } Reg;
176         struct {
177            AMD64AMode* am;
178         } Mem;
179      }
180      Armi;
181   }
182   AMD64RMI;
183
184extern AMD64RMI* AMD64RMI_Imm ( UInt );
185extern AMD64RMI* AMD64RMI_Reg ( HReg );
186extern AMD64RMI* AMD64RMI_Mem ( AMD64AMode* );
187
188extern void ppAMD64RMI      ( AMD64RMI* );
189extern void ppAMD64RMI_lo32 ( AMD64RMI* );
190
191
192/* --------- Operand, which can be reg or immediate only. --------- */
193
194typedef
195   enum {
196      Ari_Imm,
197      Ari_Reg
198   }
199   AMD64RITag;
200
201typedef
202   struct {
203      AMD64RITag tag;
204      union {
205         struct {
206            UInt imm32;
207         } Imm;
208         struct {
209            HReg reg;
210         } Reg;
211      }
212      Ari;
213   }
214   AMD64RI;
215
216extern AMD64RI* AMD64RI_Imm ( UInt );
217extern AMD64RI* AMD64RI_Reg ( HReg );
218
219extern void ppAMD64RI ( AMD64RI* );
220
221
222/* --------- Operand, which can be reg or memory only. --------- */
223
224typedef
225   enum {
226      Arm_Reg,
227      Arm_Mem
228   }
229   AMD64RMTag;
230
231typedef
232   struct {
233      AMD64RMTag tag;
234      union {
235         struct {
236            HReg reg;
237         } Reg;
238         struct {
239            AMD64AMode* am;
240         } Mem;
241      }
242      Arm;
243   }
244   AMD64RM;
245
246extern AMD64RM* AMD64RM_Reg ( HReg );
247extern AMD64RM* AMD64RM_Mem ( AMD64AMode* );
248
249extern void ppAMD64RM ( AMD64RM* );
250
251
252/* --------- Instructions. --------- */
253
254/* --------- */
255typedef
256   enum {
257      Aun_NEG,
258      Aun_NOT
259   }
260   AMD64UnaryOp;
261
262extern HChar* showAMD64UnaryOp ( AMD64UnaryOp );
263
264
265/* --------- */
266typedef
267   enum {
268      Aalu_INVALID,
269      Aalu_MOV,
270      Aalu_CMP,
271      Aalu_ADD, Aalu_SUB, Aalu_ADC, Aalu_SBB,
272      Aalu_AND, Aalu_OR, Aalu_XOR,
273      Aalu_MUL
274   }
275   AMD64AluOp;
276
277extern HChar* showAMD64AluOp ( AMD64AluOp );
278
279
280/* --------- */
281typedef
282   enum {
283      Ash_INVALID,
284      Ash_SHL, Ash_SHR, Ash_SAR
285   }
286   AMD64ShiftOp;
287
288extern HChar* showAMD64ShiftOp ( AMD64ShiftOp );
289
290
291/* --------- */
292typedef
293   enum {
294      Afp_INVALID,
295      /* Binary */
296      Afp_SCALE, Afp_ATAN, Afp_YL2X, Afp_YL2XP1, Afp_PREM, Afp_PREM1,
297      /* Unary */
298      Afp_SQRT,
299      Afp_SIN, Afp_COS, Afp_TAN,
300      Afp_ROUND, Afp_2XM1
301   }
302   A87FpOp;
303
304extern HChar* showA87FpOp ( A87FpOp );
305
306
307/* --------- */
308typedef
309   enum {
310      Asse_INVALID,
311      /* mov */
312      Asse_MOV,
313      /* Floating point binary */
314      Asse_ADDF, Asse_SUBF, Asse_MULF, Asse_DIVF,
315      Asse_MAXF, Asse_MINF,
316      Asse_CMPEQF, Asse_CMPLTF, Asse_CMPLEF, Asse_CMPUNF,
317      /* Floating point unary */
318      Asse_RCPF, Asse_RSQRTF, Asse_SQRTF,
319      /* Bitwise */
320      Asse_AND, Asse_OR, Asse_XOR, Asse_ANDN,
321      Asse_ADD8, Asse_ADD16, Asse_ADD32, Asse_ADD64,
322      Asse_QADD8U, Asse_QADD16U,
323      Asse_QADD8S, Asse_QADD16S,
324      Asse_SUB8, Asse_SUB16, Asse_SUB32, Asse_SUB64,
325      Asse_QSUB8U, Asse_QSUB16U,
326      Asse_QSUB8S, Asse_QSUB16S,
327      Asse_MUL16,
328      Asse_MULHI16U,
329      Asse_MULHI16S,
330      Asse_AVG8U, Asse_AVG16U,
331      Asse_MAX16S,
332      Asse_MAX8U,
333      Asse_MIN16S,
334      Asse_MIN8U,
335      Asse_CMPEQ8, Asse_CMPEQ16, Asse_CMPEQ32,
336      Asse_CMPGT8S, Asse_CMPGT16S, Asse_CMPGT32S,
337      Asse_SHL16, Asse_SHL32, Asse_SHL64,
338      Asse_SHR16, Asse_SHR32, Asse_SHR64,
339      Asse_SAR16, Asse_SAR32,
340      Asse_PACKSSD, Asse_PACKSSW, Asse_PACKUSW,
341      Asse_UNPCKHB, Asse_UNPCKHW, Asse_UNPCKHD, Asse_UNPCKHQ,
342      Asse_UNPCKLB, Asse_UNPCKLW, Asse_UNPCKLD, Asse_UNPCKLQ
343   }
344   AMD64SseOp;
345
346extern HChar* showAMD64SseOp ( AMD64SseOp );
347
348
349/* --------- */
350typedef
351   enum {
352      Ain_Imm64,       /* Generate 64-bit literal to register */
353      Ain_Alu64R,      /* 64-bit mov/arith/logical, dst=REG */
354      Ain_Alu64M,      /* 64-bit mov/arith/logical, dst=MEM */
355      Ain_Sh64,        /* 64-bit shift/rotate, dst=REG or MEM */
356      Ain_Test64,      /* 64-bit test (AND, set flags, discard result) */
357      Ain_Unary64,     /* 64-bit not and neg */
358      Ain_Lea64,       /* 64-bit compute EA into a reg */
359      Ain_Alu32R,      /* 32-bit add/sub/and/or/xor/cmp, dst=REG (a la Alu64R) */
360      Ain_MulL,        /* widening multiply */
361      Ain_Div,         /* div and mod */
362      Ain_Push,        /* push 64-bit value on stack */
363      Ain_Call,        /* call to address in register */
364      Ain_XDirect,     /* direct transfer to GA */
365      Ain_XIndir,      /* indirect transfer to GA */
366      Ain_XAssisted,   /* assisted transfer to GA */
367      Ain_CMov64,      /* conditional move */
368      Ain_MovxLQ,      /* reg-reg move, zx-ing/sx-ing top half */
369      Ain_LoadEX,      /* mov{s,z}{b,w,l}q from mem to reg */
370      Ain_Store,       /* store 32/16/8 bit value in memory */
371      Ain_Set64,       /* convert condition code to 64-bit value */
372      Ain_Bsfr64,      /* 64-bit bsf/bsr */
373      Ain_MFence,      /* mem fence */
374      Ain_ACAS,        /* 8/16/32/64-bit lock;cmpxchg */
375      Ain_DACAS,       /* lock;cmpxchg8b/16b (doubleword ACAS, 2 x
376                          32-bit or 2 x 64-bit only) */
377      Ain_A87Free,     /* free up x87 registers */
378      Ain_A87PushPop,  /* x87 loads/stores */
379      Ain_A87FpOp,     /* x87 operations */
380      Ain_A87LdCW,     /* load x87 control word */
381      Ain_A87StSW,     /* store x87 status word */
382      Ain_LdMXCSR,     /* load %mxcsr */
383      Ain_SseUComIS,   /* ucomisd/ucomiss, then get %rflags into int
384                          register */
385      Ain_SseSI2SF,    /* scalar 32/64 int to 32/64 float conversion */
386      Ain_SseSF2SI,    /* scalar 32/64 float to 32/64 int conversion */
387      Ain_SseSDSS,     /* scalar float32 to/from float64 */
388      Ain_SseLdSt,     /* SSE load/store 32/64/128 bits, no alignment
389                          constraints, upper 96/64/0 bits arbitrary */
390      Ain_SseLdzLO,    /* SSE load low 32/64 bits, zero remainder of reg */
391      Ain_Sse32Fx4,    /* SSE binary, 32Fx4 */
392      Ain_Sse32FLo,    /* SSE binary, 32F in lowest lane only */
393      Ain_Sse64Fx2,    /* SSE binary, 64Fx2 */
394      Ain_Sse64FLo,    /* SSE binary, 64F in lowest lane only */
395      Ain_SseReRg,     /* SSE binary general reg-reg, Re, Rg */
396      Ain_SseCMov,     /* SSE conditional move */
397      Ain_SseShuf,     /* SSE2 shuffle (pshufd) */
398      //uu Ain_AvxLdSt,     /* AVX load/store 256 bits,
399      //uu                     no alignment constraints */
400      //uu Ain_AvxReRg,     /* AVX binary general reg-reg, Re, Rg */
401      Ain_EvCheck,     /* Event check */
402      Ain_ProfInc      /* 64-bit profile counter increment */
403   }
404   AMD64InstrTag;
405
406/* Destinations are on the RIGHT (second operand) */
407
408typedef
409   struct {
410      AMD64InstrTag tag;
411      union {
412         struct {
413            ULong imm64;
414            HReg  dst;
415         } Imm64;
416         struct {
417            AMD64AluOp op;
418            AMD64RMI*  src;
419            HReg       dst;
420         } Alu64R;
421         struct {
422            AMD64AluOp  op;
423            AMD64RI*    src;
424            AMD64AMode* dst;
425         } Alu64M;
426         struct {
427            AMD64ShiftOp op;
428            UInt         src;  /* shift amount, or 0 means %cl */
429            HReg         dst;
430         } Sh64;
431         struct {
432            UInt   imm32;
433            HReg   dst;
434         } Test64;
435         /* Not and Neg */
436         struct {
437            AMD64UnaryOp op;
438            HReg         dst;
439         } Unary64;
440         /* 64-bit compute EA into a reg */
441         struct {
442            AMD64AMode* am;
443            HReg        dst;
444         } Lea64;
445         /* 32-bit add/sub/and/or/xor/cmp, dst=REG (a la Alu64R) */
446         struct {
447            AMD64AluOp op;
448            AMD64RMI*  src;
449            HReg       dst;
450         } Alu32R;
451         /* 64 x 64 -> 128 bit widening multiply: RDX:RAX = RAX *s/u
452            r/m64 */
453         struct {
454            Bool     syned;
455            AMD64RM* src;
456         } MulL;
457          /* amd64 div/idiv instruction.  Modifies RDX and RAX and
458	     reads src. */
459         struct {
460            Bool     syned;
461            Int      sz; /* 4 or 8 only */
462            AMD64RM* src;
463         } Div;
464         struct {
465            AMD64RMI* src;
466         } Push;
467         /* Pseudo-insn.  Call target (an absolute address), on given
468            condition (which could be Xcc_ALWAYS). */
469         struct {
470            AMD64CondCode cond;
471            Addr64        target;
472            Int           regparms; /* 0 .. 6 */
473         } Call;
474         /* Update the guest RIP value, then exit requesting to chain
475            to it.  May be conditional. */
476         struct {
477            Addr64        dstGA;    /* next guest address */
478            AMD64AMode*   amRIP;    /* amode in guest state for RIP */
479            AMD64CondCode cond;     /* can be Acc_ALWAYS */
480            Bool          toFastEP; /* chain to the slow or fast point? */
481         } XDirect;
482         /* Boring transfer to a guest address not known at JIT time.
483            Not chainable.  May be conditional. */
484         struct {
485            HReg          dstGA;
486            AMD64AMode*   amRIP;
487            AMD64CondCode cond; /* can be Acc_ALWAYS */
488         } XIndir;
489         /* Assisted transfer to a guest address, most general case.
490            Not chainable.  May be conditional. */
491         struct {
492            HReg          dstGA;
493            AMD64AMode*   amRIP;
494            AMD64CondCode cond; /* can be Acc_ALWAYS */
495            IRJumpKind    jk;
496         } XAssisted;
497         /* Mov src to dst on the given condition, which may not
498            be the bogus Acc_ALWAYS. */
499         struct {
500            AMD64CondCode cond;
501            AMD64RM*      src;
502            HReg          dst;
503         } CMov64;
504         /* reg-reg move, sx-ing/zx-ing top half */
505         struct {
506            Bool syned;
507            HReg src;
508            HReg dst;
509         } MovxLQ;
510         /* Sign/Zero extending loads.  Dst size is always 64 bits. */
511         struct {
512            UChar       szSmall; /* only 1, 2 or 4 */
513            Bool        syned;
514            AMD64AMode* src;
515            HReg        dst;
516         } LoadEX;
517         /* 32/16/8 bit stores. */
518         struct {
519            UChar       sz; /* only 1, 2 or 4 */
520            HReg        src;
521            AMD64AMode* dst;
522         } Store;
523         /* Convert an amd64 condition code to a 64-bit value (0 or 1). */
524         struct {
525            AMD64CondCode cond;
526            HReg          dst;
527         } Set64;
528         /* 64-bit bsf or bsr. */
529         struct {
530            Bool isFwds;
531            HReg src;
532            HReg dst;
533         } Bsfr64;
534         /* Mem fence.  In short, an insn which flushes all preceding
535            loads and stores as much as possible before continuing.
536            On AMD64 we emit a real "mfence". */
537         struct {
538         } MFence;
539         struct {
540            AMD64AMode* addr;
541            UChar       sz; /* 1, 2, 4 or 8 */
542         } ACAS;
543         struct {
544            AMD64AMode* addr;
545            UChar       sz; /* 4 or 8 only */
546         } DACAS;
547
548         /* --- X87 --- */
549
550         /* A very minimal set of x87 insns, that operate exactly in a
551            stack-like way so no need to think about x87 registers. */
552
553         /* Do 'ffree' on %st(7) .. %st(7-nregs) */
554         struct {
555            Int nregs; /* 1 <= nregs <= 7 */
556         } A87Free;
557
558         /* Push a 32- or 64-bit FP value from memory onto the stack,
559            or move a value from the stack to memory and remove it
560            from the stack. */
561         struct {
562            AMD64AMode* addr;
563            Bool        isPush;
564            UChar       szB; /* 4 or 8 */
565         } A87PushPop;
566
567         /* Do an operation on the top-of-stack.  This can be unary, in
568            which case it is %st0 = OP( %st0 ), or binary: %st0 = OP(
569            %st0, %st1 ). */
570         struct {
571            A87FpOp op;
572         } A87FpOp;
573
574         /* Load the FPU control word. */
575         struct {
576            AMD64AMode* addr;
577         } A87LdCW;
578
579         /* Store the FPU status word (fstsw m16) */
580         struct {
581            AMD64AMode* addr;
582         } A87StSW;
583
584         /* --- SSE --- */
585
586         /* Load 32 bits into %mxcsr. */
587         struct {
588            AMD64AMode* addr;
589         }
590         LdMXCSR;
591         /* ucomisd/ucomiss, then get %rflags into int register */
592         struct {
593            UChar   sz;   /* 4 or 8 only */
594            HReg    srcL; /* xmm */
595            HReg    srcR; /* xmm */
596            HReg    dst;  /* int */
597         } SseUComIS;
598         /* scalar 32/64 int to 32/64 float conversion */
599         struct {
600            UChar szS; /* 4 or 8 */
601            UChar szD; /* 4 or 8 */
602            HReg  src; /* i class */
603            HReg  dst; /* v class */
604         } SseSI2SF;
605         /* scalar 32/64 float to 32/64 int conversion */
606         struct {
607            UChar szS; /* 4 or 8 */
608            UChar szD; /* 4 or 8 */
609            HReg  src; /* v class */
610            HReg  dst; /* i class */
611         } SseSF2SI;
612         /* scalar float32 to/from float64 */
613         struct {
614            Bool from64; /* True: 64->32; False: 32->64 */
615            HReg src;
616            HReg dst;
617         } SseSDSS;
618         struct {
619            Bool        isLoad;
620            UChar       sz; /* 4, 8 or 16 only */
621            HReg        reg;
622            AMD64AMode* addr;
623         } SseLdSt;
624         struct {
625            Int         sz; /* 4 or 8 only */
626            HReg        reg;
627            AMD64AMode* addr;
628         } SseLdzLO;
629         struct {
630            AMD64SseOp op;
631            HReg       src;
632            HReg       dst;
633         } Sse32Fx4;
634         struct {
635            AMD64SseOp op;
636            HReg       src;
637            HReg       dst;
638         } Sse32FLo;
639         struct {
640            AMD64SseOp op;
641            HReg       src;
642            HReg       dst;
643         } Sse64Fx2;
644         struct {
645            AMD64SseOp op;
646            HReg       src;
647            HReg       dst;
648         } Sse64FLo;
649         struct {
650            AMD64SseOp op;
651            HReg       src;
652            HReg       dst;
653         } SseReRg;
654         /* Mov src to dst on the given condition, which may not
655            be the bogus Xcc_ALWAYS. */
656         struct {
657            AMD64CondCode cond;
658            HReg          src;
659            HReg          dst;
660         } SseCMov;
661         struct {
662            Int    order; /* 0 <= order <= 0xFF */
663            HReg   src;
664            HReg   dst;
665         } SseShuf;
666         //uu struct {
667         //uu    Bool        isLoad;
668         //uu    HReg        reg;
669         //uu    AMD64AMode* addr;
670         //uu } AvxLdSt;
671         //uu struct {
672         //uu    AMD64SseOp op;
673         //uu    HReg       src;
674         //uu    HReg       dst;
675         //uu } AvxReRg;
676         struct {
677            AMD64AMode* amCounter;
678            AMD64AMode* amFailAddr;
679         } EvCheck;
680         struct {
681            /* No fields.  The address of the counter to inc is
682               installed later, post-translation, by patching it in,
683               as it is not known at translation time. */
684         } ProfInc;
685
686      } Ain;
687   }
688   AMD64Instr;
689
690extern AMD64Instr* AMD64Instr_Imm64      ( ULong imm64, HReg dst );
691extern AMD64Instr* AMD64Instr_Alu64R     ( AMD64AluOp, AMD64RMI*, HReg );
692extern AMD64Instr* AMD64Instr_Alu64M     ( AMD64AluOp, AMD64RI*,  AMD64AMode* );
693extern AMD64Instr* AMD64Instr_Unary64    ( AMD64UnaryOp op, HReg dst );
694extern AMD64Instr* AMD64Instr_Lea64      ( AMD64AMode* am, HReg dst );
695extern AMD64Instr* AMD64Instr_Alu32R     ( AMD64AluOp, AMD64RMI*, HReg );
696extern AMD64Instr* AMD64Instr_Sh64       ( AMD64ShiftOp, UInt, HReg );
697extern AMD64Instr* AMD64Instr_Test64     ( UInt imm32, HReg dst );
698extern AMD64Instr* AMD64Instr_MulL       ( Bool syned, AMD64RM* );
699extern AMD64Instr* AMD64Instr_Div        ( Bool syned, Int sz, AMD64RM* );
700extern AMD64Instr* AMD64Instr_Push       ( AMD64RMI* );
701extern AMD64Instr* AMD64Instr_Call       ( AMD64CondCode, Addr64, Int );
702extern AMD64Instr* AMD64Instr_XDirect    ( Addr64 dstGA, AMD64AMode* amRIP,
703                                           AMD64CondCode cond, Bool toFastEP );
704extern AMD64Instr* AMD64Instr_XIndir     ( HReg dstGA, AMD64AMode* amRIP,
705                                           AMD64CondCode cond );
706extern AMD64Instr* AMD64Instr_XAssisted  ( HReg dstGA, AMD64AMode* amRIP,
707                                           AMD64CondCode cond, IRJumpKind jk );
708extern AMD64Instr* AMD64Instr_CMov64     ( AMD64CondCode, AMD64RM* src, HReg dst );
709extern AMD64Instr* AMD64Instr_MovxLQ     ( Bool syned, HReg src, HReg dst );
710extern AMD64Instr* AMD64Instr_LoadEX     ( UChar szSmall, Bool syned,
711                                           AMD64AMode* src, HReg dst );
712extern AMD64Instr* AMD64Instr_Store      ( UChar sz, HReg src, AMD64AMode* dst );
713extern AMD64Instr* AMD64Instr_Set64      ( AMD64CondCode cond, HReg dst );
714extern AMD64Instr* AMD64Instr_Bsfr64     ( Bool isFwds, HReg src, HReg dst );
715extern AMD64Instr* AMD64Instr_MFence     ( void );
716extern AMD64Instr* AMD64Instr_ACAS       ( AMD64AMode* addr, UChar sz );
717extern AMD64Instr* AMD64Instr_DACAS      ( AMD64AMode* addr, UChar sz );
718
719extern AMD64Instr* AMD64Instr_A87Free    ( Int nregs );
720extern AMD64Instr* AMD64Instr_A87PushPop ( AMD64AMode* addr, Bool isPush, UChar szB );
721extern AMD64Instr* AMD64Instr_A87FpOp    ( A87FpOp op );
722extern AMD64Instr* AMD64Instr_A87LdCW    ( AMD64AMode* addr );
723extern AMD64Instr* AMD64Instr_A87StSW    ( AMD64AMode* addr );
724extern AMD64Instr* AMD64Instr_LdMXCSR    ( AMD64AMode* );
725extern AMD64Instr* AMD64Instr_SseUComIS  ( Int sz, HReg srcL, HReg srcR, HReg dst );
726extern AMD64Instr* AMD64Instr_SseSI2SF   ( Int szS, Int szD, HReg src, HReg dst );
727extern AMD64Instr* AMD64Instr_SseSF2SI   ( Int szS, Int szD, HReg src, HReg dst );
728extern AMD64Instr* AMD64Instr_SseSDSS    ( Bool from64, HReg src, HReg dst );
729extern AMD64Instr* AMD64Instr_SseLdSt    ( Bool isLoad, Int sz, HReg, AMD64AMode* );
730extern AMD64Instr* AMD64Instr_SseLdzLO   ( Int sz, HReg, AMD64AMode* );
731extern AMD64Instr* AMD64Instr_Sse32Fx4   ( AMD64SseOp, HReg, HReg );
732extern AMD64Instr* AMD64Instr_Sse32FLo   ( AMD64SseOp, HReg, HReg );
733extern AMD64Instr* AMD64Instr_Sse64Fx2   ( AMD64SseOp, HReg, HReg );
734extern AMD64Instr* AMD64Instr_Sse64FLo   ( AMD64SseOp, HReg, HReg );
735extern AMD64Instr* AMD64Instr_SseReRg    ( AMD64SseOp, HReg, HReg );
736extern AMD64Instr* AMD64Instr_SseCMov    ( AMD64CondCode, HReg src, HReg dst );
737extern AMD64Instr* AMD64Instr_SseShuf    ( Int order, HReg src, HReg dst );
738//uu extern AMD64Instr* AMD64Instr_AvxLdSt    ( Bool isLoad, HReg, AMD64AMode* );
739//uu extern AMD64Instr* AMD64Instr_AvxReRg    ( AMD64SseOp, HReg, HReg );
740extern AMD64Instr* AMD64Instr_EvCheck    ( AMD64AMode* amCounter,
741                                           AMD64AMode* amFailAddr );
742extern AMD64Instr* AMD64Instr_ProfInc    ( void );
743
744
745extern void ppAMD64Instr ( AMD64Instr*, Bool );
746
747/* Some functions that insulate the register allocator from details
748   of the underlying instruction set. */
749extern void         getRegUsage_AMD64Instr ( HRegUsage*, AMD64Instr*, Bool );
750extern void         mapRegs_AMD64Instr     ( HRegRemap*, AMD64Instr*, Bool );
751extern Bool         isMove_AMD64Instr      ( AMD64Instr*, HReg*, HReg* );
752extern Int          emit_AMD64Instr        ( /*MB_MOD*/Bool* is_profInc,
753                                             UChar* buf, Int nbuf, AMD64Instr* i,
754                                             Bool mode64,
755                                             void* disp_cp_chain_me_to_slowEP,
756                                             void* disp_cp_chain_me_to_fastEP,
757                                             void* disp_cp_xindir,
758                                             void* disp_cp_xassisted );
759
760extern void genSpill_AMD64  ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
761                              HReg rreg, Int offset, Bool );
762extern void genReload_AMD64 ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
763                              HReg rreg, Int offset, Bool );
764
765extern void         getAllocableRegs_AMD64 ( Int*, HReg** );
766extern HInstrArray* iselSB_AMD64           ( IRSB*,
767                                             VexArch,
768                                             VexArchInfo*,
769                                             VexAbiInfo*,
770                                             Int offs_Host_EvC_Counter,
771                                             Int offs_Host_EvC_FailAddr,
772                                             Bool chainingAllowed,
773                                             Bool addProfInc,
774                                             Addr64 max_ga );
775
776/* How big is an event check?  This is kind of a kludge because it
777   depends on the offsets of host_EvC_FAILADDR and host_EvC_COUNTER,
778   and so assumes that they are both <= 128, and so can use the short
779   offset encoding.  This is all checked with assertions, so in the
780   worst case we will merely assert at startup. */
781extern Int evCheckSzB_AMD64 ( void );
782
783/* Perform a chaining and unchaining of an XDirect jump. */
784extern VexInvalRange chainXDirect_AMD64 ( void* place_to_chain,
785                                          void* disp_cp_chain_me_EXPECTED,
786                                          void* place_to_jump_to );
787
788extern VexInvalRange unchainXDirect_AMD64 ( void* place_to_unchain,
789                                            void* place_to_jump_to_EXPECTED,
790                                            void* disp_cp_chain_me );
791
792/* Patch the counter location into an existing ProfInc point. */
793extern VexInvalRange patchProfInc_AMD64 ( void*  place_to_patch,
794                                          ULong* location_of_counter );
795
796
797#endif /* ndef __VEX_HOST_AMD64_DEFS_H */
798
799/*---------------------------------------------------------------*/
800/*--- end                                   host_amd64_defs.h ---*/
801/*---------------------------------------------------------------*/
802