1
2/*---------------------------------------------------------------*/
3/*--- begin                                       main_main.c ---*/
4/*---------------------------------------------------------------*/
5
6/*
7   This file is part of Valgrind, a dynamic binary instrumentation
8   framework.
9
10   Copyright (C) 2004-2013 OpenWorks LLP
11      info@open-works.net
12
13   This program is free software; you can redistribute it and/or
14   modify it under the terms of the GNU General Public License as
15   published by the Free Software Foundation; either version 2 of the
16   License, or (at your option) any later version.
17
18   This program is distributed in the hope that it will be useful, but
19   WITHOUT ANY WARRANTY; without even the implied warranty of
20   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21   General Public License for more details.
22
23   You should have received a copy of the GNU General Public License
24   along with this program; if not, write to the Free Software
25   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26   02110-1301, USA.
27
28   The GNU General Public License is contained in the file COPYING.
29
30   Neither the names of the U.S. Department of Energy nor the
31   University of California nor the names of its contributors may be
32   used to endorse or promote products derived from this software
33   without prior written permission.
34*/
35
36#include "libvex.h"
37#include "libvex_emnote.h"
38#include "libvex_guest_x86.h"
39#include "libvex_guest_amd64.h"
40#include "libvex_guest_arm.h"
41#include "libvex_guest_arm64.h"
42#include "libvex_guest_ppc32.h"
43#include "libvex_guest_ppc64.h"
44#include "libvex_guest_s390x.h"
45#include "libvex_guest_mips32.h"
46#include "libvex_guest_mips64.h"
47
48#include "main_globals.h"
49#include "main_util.h"
50#include "host_generic_regs.h"
51#include "ir_opt.h"
52
53#include "host_x86_defs.h"
54#include "host_amd64_defs.h"
55#include "host_ppc_defs.h"
56#include "host_arm_defs.h"
57#include "host_arm64_defs.h"
58#include "host_s390_defs.h"
59#include "host_mips_defs.h"
60
61#include "guest_generic_bb_to_IR.h"
62#include "guest_x86_defs.h"
63#include "guest_amd64_defs.h"
64#include "guest_arm_defs.h"
65#include "guest_arm64_defs.h"
66#include "guest_ppc_defs.h"
67#include "guest_s390_defs.h"
68#include "guest_mips_defs.h"
69
70#include "host_generic_simd128.h"
71
72
73/* This file contains the top level interface to the library. */
74
75/* --------- fwds ... --------- */
76
77static Bool   are_valid_hwcaps ( VexArch arch, UInt hwcaps );
78static const HChar* show_hwcaps ( VexArch arch, UInt hwcaps );
79
80
81/* --------- helpers --------- */
82
83__attribute__((noinline))
84static UInt udiv32 ( UInt x, UInt y ) { return x/y; }
85__attribute__((noinline))
86static  Int sdiv32 (  Int x,  Int y ) { return x/y; }
87
88
89/* --------- Initialise the library. --------- */
90
91/* Exported to library client. */
92
93void LibVEX_default_VexControl ( /*OUT*/ VexControl* vcon )
94{
95   vex_bzero(vcon, sizeof(*vcon));
96   vcon->iropt_verbosity            = 0;
97   vcon->iropt_level                = 2;
98   vcon->iropt_register_updates     = VexRegUpdUnwindregsAtMemAccess;
99   vcon->iropt_unroll_thresh        = 120;
100   vcon->guest_max_insns            = 60;
101   vcon->guest_chase_thresh         = 10;
102   vcon->guest_chase_cond           = False;
103}
104
105
106/* Exported to library client. */
107
108void LibVEX_Init (
109   /* failure exit function */
110   __attribute__ ((noreturn))
111   void (*failure_exit) ( void ),
112   /* logging output function */
113   void (*log_bytes) ( HChar*, Int nbytes ),
114   /* debug paranoia level */
115   Int debuglevel,
116   /* Are we supporting valgrind checking? */
117   Bool valgrind_support,
118   /* Control ... */
119   /*READONLY*/VexControl* vcon
120)
121{
122   /* First off, do enough minimal setup so that the following
123      assertions can fail in a sane fashion, if need be. */
124   vex_failure_exit = failure_exit;
125   vex_log_bytes    = log_bytes;
126
127   /* Now it's safe to check parameters for sanity. */
128   vassert(!vex_initdone);
129   vassert(failure_exit);
130   vassert(log_bytes);
131   vassert(debuglevel >= 0);
132
133   vassert(vcon->iropt_verbosity >= 0);
134   vassert(vcon->iropt_level >= 0);
135   vassert(vcon->iropt_level <= 2);
136   vassert(vcon->iropt_unroll_thresh >= 0);
137   vassert(vcon->iropt_unroll_thresh <= 400);
138   vassert(vcon->guest_max_insns >= 1);
139   vassert(vcon->guest_max_insns <= 100);
140   vassert(vcon->guest_chase_thresh >= 0);
141   vassert(vcon->guest_chase_thresh < vcon->guest_max_insns);
142   vassert(vcon->guest_chase_cond == True
143           || vcon->guest_chase_cond == False);
144
145   /* Check that Vex has been built with sizes of basic types as
146      stated in priv/libvex_basictypes.h.  Failure of any of these is
147      a serious configuration error and should be corrected
148      immediately.  If any of these assertions fail you can fully
149      expect Vex not to work properly, if at all. */
150
151   vassert(1 == sizeof(UChar));
152   vassert(1 == sizeof(Char));
153   vassert(2 == sizeof(UShort));
154   vassert(2 == sizeof(Short));
155   vassert(4 == sizeof(UInt));
156   vassert(4 == sizeof(Int));
157   vassert(8 == sizeof(ULong));
158   vassert(8 == sizeof(Long));
159   vassert(4 == sizeof(Float));
160   vassert(8 == sizeof(Double));
161   vassert(1 == sizeof(Bool));
162   vassert(4 == sizeof(Addr32));
163   vassert(8 == sizeof(Addr64));
164   vassert(16 == sizeof(U128));
165   vassert(16 == sizeof(V128));
166   vassert(32 == sizeof(U256));
167
168   vassert(sizeof(void*) == 4 || sizeof(void*) == 8);
169   vassert(sizeof(void*) == sizeof(int*));
170   vassert(sizeof(void*) == sizeof(HWord));
171
172   vassert(VEX_HOST_WORDSIZE == sizeof(void*));
173   vassert(VEX_HOST_WORDSIZE == sizeof(HWord));
174
175   /* These take a lot of space, so make sure we don't have
176      any unnoticed size regressions. */
177   if (VEX_HOST_WORDSIZE == 4) {
178      vassert(sizeof(IRExpr) == 16);
179      vassert(sizeof(IRStmt) == 20 /* x86 */
180              || sizeof(IRStmt) == 24 /* arm */);
181   } else {
182      vassert(sizeof(IRExpr) == 32);
183      vassert(sizeof(IRStmt) == 32);
184   }
185
186   /* Check that signed integer division on the host rounds towards
187      zero.  If not, h_calc_sdiv32_w_arm_semantics() won't work
188      correctly. */
189   /* 100.0 / 7.0 == 14.2857 */
190   vassert(udiv32(100, 7) == 14);
191   vassert(sdiv32(100, 7) == 14);
192   vassert(sdiv32(-100, 7) == -14); /* and not -15 */
193   vassert(sdiv32(100, -7) == -14); /* ditto */
194   vassert(sdiv32(-100, -7) == 14); /* not sure what this proves */
195
196   /* Really start up .. */
197   vex_debuglevel         = debuglevel;
198   vex_valgrind_support   = valgrind_support;
199   vex_control            = *vcon;
200   vex_initdone           = True;
201   vexSetAllocMode ( VexAllocModeTEMP );
202}
203
204
205/* --------- Make a translation. --------- */
206
207/* Exported to library client. */
208
209VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
210{
211   /* This the bundle of functions we need to do the back-end stuff
212      (insn selection, reg-alloc, assembly) whilst being insulated
213      from the target instruction set. */
214   HReg* available_real_regs;
215   Int   n_available_real_regs;
216   Bool         (*isMove)       ( HInstr*, HReg*, HReg* );
217   void         (*getRegUsage)  ( HRegUsage*, HInstr*, Bool );
218   void         (*mapRegs)      ( HRegRemap*, HInstr*, Bool );
219   void         (*genSpill)     ( HInstr**, HInstr**, HReg, Int, Bool );
220   void         (*genReload)    ( HInstr**, HInstr**, HReg, Int, Bool );
221   HInstr*      (*directReload) ( HInstr*, HReg, Short );
222   void         (*ppInstr)      ( HInstr*, Bool );
223   void         (*ppReg)        ( HReg );
224   HInstrArray* (*iselSB)       ( IRSB*, VexArch, VexArchInfo*, VexAbiInfo*,
225                                  Int, Int, Bool, Bool, Addr64 );
226   Int          (*emit)         ( /*MB_MOD*/Bool*,
227                                  UChar*, Int, HInstr*, Bool,
228                                  void*, void*, void*, void* );
229   IRExpr*      (*specHelper)   ( const HChar*, IRExpr**, IRStmt**, Int );
230   Bool         (*preciseMemExnsFn) ( Int, Int );
231
232   DisOneInstrFn disInstrFn;
233
234   VexGuestLayout* guest_layout;
235   Bool            host_is_bigendian = False;
236   IRSB*           irsb;
237   HInstrArray*    vcode;
238   HInstrArray*    rcode;
239   Int             i, j, k, out_used, guest_sizeB;
240   Int             offB_CMSTART, offB_CMLEN, offB_GUEST_IP, szB_GUEST_IP;
241   Int             offB_HOST_EvC_COUNTER, offB_HOST_EvC_FAILADDR;
242   UChar           insn_bytes[128];
243   IRType          guest_word_type;
244   IRType          host_word_type;
245   Bool            mode64, chainingAllowed;
246   Addr64          max_ga;
247
248   guest_layout           = NULL;
249   available_real_regs    = NULL;
250   n_available_real_regs  = 0;
251   isMove                 = NULL;
252   getRegUsage            = NULL;
253   mapRegs                = NULL;
254   genSpill               = NULL;
255   genReload              = NULL;
256   directReload           = NULL;
257   ppInstr                = NULL;
258   ppReg                  = NULL;
259   iselSB                 = NULL;
260   emit                   = NULL;
261   specHelper             = NULL;
262   preciseMemExnsFn       = NULL;
263   disInstrFn             = NULL;
264   guest_word_type        = Ity_INVALID;
265   host_word_type         = Ity_INVALID;
266   offB_CMSTART           = 0;
267   offB_CMLEN             = 0;
268   offB_GUEST_IP          = 0;
269   szB_GUEST_IP           = 0;
270   offB_HOST_EvC_COUNTER  = 0;
271   offB_HOST_EvC_FAILADDR = 0;
272   mode64                 = False;
273   chainingAllowed        = False;
274
275   vex_traceflags = vta->traceflags;
276
277   vassert(vex_initdone);
278   vassert(vta->needs_self_check  != NULL);
279   vassert(vta->disp_cp_xassisted != NULL);
280   /* Both the chainers and the indir are either NULL or non-NULL. */
281   if (vta->disp_cp_chain_me_to_slowEP        != NULL) {
282      vassert(vta->disp_cp_chain_me_to_fastEP != NULL);
283      vassert(vta->disp_cp_xindir             != NULL);
284      chainingAllowed = True;
285   } else {
286      vassert(vta->disp_cp_chain_me_to_fastEP == NULL);
287      vassert(vta->disp_cp_xindir             == NULL);
288   }
289
290   vexSetAllocModeTEMP_and_clear();
291   vexAllocSanityCheck();
292
293   /* First off, check that the guest and host insn sets
294      are supported. */
295
296   switch (vta->arch_host) {
297
298      case VexArchX86:
299         mode64       = False;
300         getAllocableRegs_X86 ( &n_available_real_regs,
301                                &available_real_regs );
302         isMove       = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_X86Instr;
303         getRegUsage  = (void(*)(HRegUsage*,HInstr*, Bool))
304                        getRegUsage_X86Instr;
305         mapRegs      = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_X86Instr;
306         genSpill     = (void(*)(HInstr**,HInstr**,HReg,Int,Bool))
307                        genSpill_X86;
308         genReload    = (void(*)(HInstr**,HInstr**,HReg,Int,Bool))
309                        genReload_X86;
310         directReload = (HInstr*(*)(HInstr*,HReg,Short)) directReload_X86;
311         ppInstr      = (void(*)(HInstr*, Bool)) ppX86Instr;
312         ppReg        = (void(*)(HReg)) ppHRegX86;
313         iselSB       = iselSB_X86;
314         emit         = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
315                               void*,void*,void*,void*))
316                        emit_X86Instr;
317         host_is_bigendian = False;
318         host_word_type    = Ity_I32;
319         vassert(are_valid_hwcaps(VexArchX86, vta->archinfo_host.hwcaps));
320         break;
321
322      case VexArchAMD64:
323         mode64      = True;
324         getAllocableRegs_AMD64 ( &n_available_real_regs,
325                                  &available_real_regs );
326         isMove      = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_AMD64Instr;
327         getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool))
328                       getRegUsage_AMD64Instr;
329         mapRegs     = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_AMD64Instr;
330         genSpill    = (void(*)(HInstr**,HInstr**,HReg,Int,Bool))
331                       genSpill_AMD64;
332         genReload   = (void(*)(HInstr**,HInstr**,HReg,Int,Bool))
333                       genReload_AMD64;
334         ppInstr     = (void(*)(HInstr*, Bool)) ppAMD64Instr;
335         ppReg       = (void(*)(HReg)) ppHRegAMD64;
336         iselSB      = iselSB_AMD64;
337         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
338                               void*,void*,void*,void*))
339                       emit_AMD64Instr;
340         host_is_bigendian = False;
341         host_word_type    = Ity_I64;
342         vassert(are_valid_hwcaps(VexArchAMD64, vta->archinfo_host.hwcaps));
343         break;
344
345      case VexArchPPC32:
346         mode64      = False;
347         getAllocableRegs_PPC ( &n_available_real_regs,
348                                &available_real_regs, mode64 );
349         isMove      = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_PPCInstr;
350         getRegUsage = (void(*)(HRegUsage*,HInstr*,Bool)) getRegUsage_PPCInstr;
351         mapRegs     = (void(*)(HRegRemap*,HInstr*,Bool)) mapRegs_PPCInstr;
352         genSpill    = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genSpill_PPC;
353         genReload   = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genReload_PPC;
354         ppInstr     = (void(*)(HInstr*,Bool)) ppPPCInstr;
355         ppReg       = (void(*)(HReg)) ppHRegPPC;
356         iselSB      = iselSB_PPC;
357         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
358                               void*,void*,void*,void*))
359                       emit_PPCInstr;
360         host_is_bigendian = True;
361         host_word_type    = Ity_I32;
362         vassert(are_valid_hwcaps(VexArchPPC32, vta->archinfo_host.hwcaps));
363         break;
364
365      case VexArchPPC64:
366         mode64      = True;
367         getAllocableRegs_PPC ( &n_available_real_regs,
368                                &available_real_regs, mode64 );
369         isMove      = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_PPCInstr;
370         getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool)) getRegUsage_PPCInstr;
371         mapRegs     = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_PPCInstr;
372         genSpill    = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genSpill_PPC;
373         genReload   = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genReload_PPC;
374         ppInstr     = (void(*)(HInstr*, Bool)) ppPPCInstr;
375         ppReg       = (void(*)(HReg)) ppHRegPPC;
376         iselSB      = iselSB_PPC;
377         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
378                               void*,void*,void*,void*))
379                       emit_PPCInstr;
380         host_is_bigendian = True;
381         host_word_type    = Ity_I64;
382         vassert(are_valid_hwcaps(VexArchPPC64, vta->archinfo_host.hwcaps));
383         break;
384
385      case VexArchS390X:
386         mode64      = True;
387         getAllocableRegs_S390 ( &n_available_real_regs,
388                                 &available_real_regs, mode64 );
389         isMove      = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_S390Instr;
390         getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool)) getRegUsage_S390Instr;
391         mapRegs     = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_S390Instr;
392         genSpill    = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genSpill_S390;
393         genReload   = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genReload_S390;
394         ppInstr     = (void(*)(HInstr*, Bool)) ppS390Instr;
395         ppReg       = (void(*)(HReg)) ppHRegS390;
396         iselSB      = iselSB_S390;
397         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
398                               void*,void*,void*,void*)) emit_S390Instr;
399         host_is_bigendian = True;
400         host_word_type    = Ity_I64;
401         vassert(are_valid_hwcaps(VexArchS390X, vta->archinfo_host.hwcaps));
402         break;
403
404      case VexArchARM:
405         mode64      = False;
406         getAllocableRegs_ARM ( &n_available_real_regs,
407                                &available_real_regs );
408         isMove      = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_ARMInstr;
409         getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool)) getRegUsage_ARMInstr;
410         mapRegs     = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_ARMInstr;
411         genSpill    = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genSpill_ARM;
412         genReload   = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genReload_ARM;
413         ppInstr     = (void(*)(HInstr*, Bool)) ppARMInstr;
414         ppReg       = (void(*)(HReg)) ppHRegARM;
415         iselSB      = iselSB_ARM;
416         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
417                               void*,void*,void*,void*))
418                       emit_ARMInstr;
419         host_is_bigendian = False;
420         host_word_type    = Ity_I32;
421         vassert(are_valid_hwcaps(VexArchARM, vta->archinfo_host.hwcaps));
422         break;
423
424      case VexArchARM64:
425         mode64      = True;
426         getAllocableRegs_ARM64 ( &n_available_real_regs,
427                                  &available_real_regs );
428         isMove      = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_ARM64Instr;
429         getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool))
430                       getRegUsage_ARM64Instr;
431         mapRegs     = (void(*)(HRegRemap*,HInstr*, Bool))
432                       mapRegs_ARM64Instr;
433         genSpill    = (void(*)(HInstr**,HInstr**,HReg,Int,Bool))
434                       genSpill_ARM64;
435         genReload   = (void(*)(HInstr**,HInstr**,HReg,Int,Bool))
436                       genReload_ARM64;
437         ppInstr     = (void(*)(HInstr*, Bool)) ppARM64Instr;
438         ppReg       = (void(*)(HReg)) ppHRegARM64;
439         iselSB      = iselSB_ARM64;
440         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
441                               void*,void*,void*,void*))
442                       emit_ARM64Instr;
443         host_is_bigendian = False;
444         host_word_type    = Ity_I64;
445         vassert(are_valid_hwcaps(VexArchARM64, vta->archinfo_host.hwcaps));
446         break;
447
448      case VexArchMIPS32:
449         mode64      = False;
450         getAllocableRegs_MIPS ( &n_available_real_regs,
451                                &available_real_regs, mode64 );
452         isMove      = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_MIPSInstr;
453         getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool)) getRegUsage_MIPSInstr;
454         mapRegs     = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_MIPSInstr;
455         genSpill    = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genSpill_MIPS;
456         genReload   = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genReload_MIPS;
457         ppInstr     = (void(*)(HInstr*, Bool)) ppMIPSInstr;
458         ppReg       = (void(*)(HReg)) ppHRegMIPS;
459         iselSB      = iselSB_MIPS;
460         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
461                               void*,void*,void*,void*))
462                       emit_MIPSInstr;
463#        if defined(VKI_LITTLE_ENDIAN)
464         host_is_bigendian = False;
465#        elif defined(VKI_BIG_ENDIAN)
466         host_is_bigendian = True;
467#        endif
468         host_word_type    = Ity_I32;
469         vassert(are_valid_hwcaps(VexArchMIPS32, vta->archinfo_host.hwcaps));
470         break;
471
472      case VexArchMIPS64:
473         mode64      = True;
474         getAllocableRegs_MIPS ( &n_available_real_regs,
475                                 &available_real_regs, mode64 );
476         isMove      = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_MIPSInstr;
477         getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool)) getRegUsage_MIPSInstr;
478         mapRegs     = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_MIPSInstr;
479         genSpill    = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genSpill_MIPS;
480         genReload   = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genReload_MIPS;
481         ppInstr     = (void(*)(HInstr*, Bool)) ppMIPSInstr;
482         ppReg       = (void(*)(HReg)) ppHRegMIPS;
483         iselSB      = iselSB_MIPS;
484         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
485                               void*,void*,void*,void*))
486                       emit_MIPSInstr;
487#        if defined(VKI_LITTLE_ENDIAN)
488         host_is_bigendian = False;
489#        elif defined(VKI_BIG_ENDIAN)
490         host_is_bigendian = True;
491#        endif
492         host_word_type    = Ity_I64;
493         vassert(are_valid_hwcaps(VexArchMIPS64, vta->archinfo_host.hwcaps));
494         break;
495
496      default:
497         vpanic("LibVEX_Translate: unsupported host insn set");
498   }
499
500
501   switch (vta->arch_guest) {
502
503      case VexArchX86:
504         preciseMemExnsFn       = guest_x86_state_requires_precise_mem_exns;
505         disInstrFn             = disInstr_X86;
506         specHelper             = guest_x86_spechelper;
507         guest_sizeB            = sizeof(VexGuestX86State);
508         guest_word_type        = Ity_I32;
509         guest_layout           = &x86guest_layout;
510         offB_CMSTART           = offsetof(VexGuestX86State,guest_CMSTART);
511         offB_CMLEN             = offsetof(VexGuestX86State,guest_CMLEN);
512         offB_GUEST_IP          = offsetof(VexGuestX86State,guest_EIP);
513         szB_GUEST_IP           = sizeof( ((VexGuestX86State*)0)->guest_EIP );
514         offB_HOST_EvC_COUNTER  = offsetof(VexGuestX86State,host_EvC_COUNTER);
515         offB_HOST_EvC_FAILADDR = offsetof(VexGuestX86State,host_EvC_FAILADDR);
516         vassert(are_valid_hwcaps(VexArchX86, vta->archinfo_guest.hwcaps));
517         vassert(0 == sizeof(VexGuestX86State) % 16);
518         vassert(sizeof( ((VexGuestX86State*)0)->guest_CMSTART) == 4);
519         vassert(sizeof( ((VexGuestX86State*)0)->guest_CMLEN  ) == 4);
520         vassert(sizeof( ((VexGuestX86State*)0)->guest_NRADDR ) == 4);
521         break;
522
523      case VexArchAMD64:
524         preciseMemExnsFn       = guest_amd64_state_requires_precise_mem_exns;
525         disInstrFn             = disInstr_AMD64;
526         specHelper             = guest_amd64_spechelper;
527         guest_sizeB            = sizeof(VexGuestAMD64State);
528         guest_word_type        = Ity_I64;
529         guest_layout           = &amd64guest_layout;
530         offB_CMSTART           = offsetof(VexGuestAMD64State,guest_CMSTART);
531         offB_CMLEN             = offsetof(VexGuestAMD64State,guest_CMLEN);
532         offB_GUEST_IP          = offsetof(VexGuestAMD64State,guest_RIP);
533         szB_GUEST_IP           = sizeof( ((VexGuestAMD64State*)0)->guest_RIP );
534         offB_HOST_EvC_COUNTER  = offsetof(VexGuestAMD64State,host_EvC_COUNTER);
535         offB_HOST_EvC_FAILADDR = offsetof(VexGuestAMD64State,host_EvC_FAILADDR);
536         vassert(are_valid_hwcaps(VexArchAMD64, vta->archinfo_guest.hwcaps));
537         vassert(0 == sizeof(VexGuestAMD64State) % 16);
538         vassert(sizeof( ((VexGuestAMD64State*)0)->guest_CMSTART ) == 8);
539         vassert(sizeof( ((VexGuestAMD64State*)0)->guest_CMLEN   ) == 8);
540         vassert(sizeof( ((VexGuestAMD64State*)0)->guest_NRADDR  ) == 8);
541         break;
542
543      case VexArchPPC32:
544         preciseMemExnsFn       = guest_ppc32_state_requires_precise_mem_exns;
545         disInstrFn             = disInstr_PPC;
546         specHelper             = guest_ppc32_spechelper;
547         guest_sizeB            = sizeof(VexGuestPPC32State);
548         guest_word_type        = Ity_I32;
549         guest_layout           = &ppc32Guest_layout;
550         offB_CMSTART           = offsetof(VexGuestPPC32State,guest_CMSTART);
551         offB_CMLEN             = offsetof(VexGuestPPC32State,guest_CMLEN);
552         offB_GUEST_IP          = offsetof(VexGuestPPC32State,guest_CIA);
553         szB_GUEST_IP           = sizeof( ((VexGuestPPC32State*)0)->guest_CIA );
554         offB_HOST_EvC_COUNTER  = offsetof(VexGuestPPC32State,host_EvC_COUNTER);
555         offB_HOST_EvC_FAILADDR = offsetof(VexGuestPPC32State,host_EvC_FAILADDR);
556         vassert(are_valid_hwcaps(VexArchPPC32, vta->archinfo_guest.hwcaps));
557         vassert(0 == sizeof(VexGuestPPC32State) % 16);
558         vassert(sizeof( ((VexGuestPPC32State*)0)->guest_CMSTART ) == 4);
559         vassert(sizeof( ((VexGuestPPC32State*)0)->guest_CMLEN   ) == 4);
560         vassert(sizeof( ((VexGuestPPC32State*)0)->guest_NRADDR  ) == 4);
561         break;
562
563      case VexArchPPC64:
564         preciseMemExnsFn       = guest_ppc64_state_requires_precise_mem_exns;
565         disInstrFn             = disInstr_PPC;
566         specHelper             = guest_ppc64_spechelper;
567         guest_sizeB            = sizeof(VexGuestPPC64State);
568         guest_word_type        = Ity_I64;
569         guest_layout           = &ppc64Guest_layout;
570         offB_CMSTART           = offsetof(VexGuestPPC64State,guest_CMSTART);
571         offB_CMLEN             = offsetof(VexGuestPPC64State,guest_CMLEN);
572         offB_GUEST_IP          = offsetof(VexGuestPPC64State,guest_CIA);
573         szB_GUEST_IP           = sizeof( ((VexGuestPPC64State*)0)->guest_CIA );
574         offB_HOST_EvC_COUNTER  = offsetof(VexGuestPPC64State,host_EvC_COUNTER);
575         offB_HOST_EvC_FAILADDR = offsetof(VexGuestPPC64State,host_EvC_FAILADDR);
576         vassert(are_valid_hwcaps(VexArchPPC64, vta->archinfo_guest.hwcaps));
577         vassert(0 == sizeof(VexGuestPPC64State) % 16);
578         vassert(sizeof( ((VexGuestPPC64State*)0)->guest_CMSTART    ) == 8);
579         vassert(sizeof( ((VexGuestPPC64State*)0)->guest_CMLEN      ) == 8);
580         vassert(sizeof( ((VexGuestPPC64State*)0)->guest_NRADDR     ) == 8);
581         vassert(sizeof( ((VexGuestPPC64State*)0)->guest_NRADDR_GPR2) == 8);
582         break;
583
584      case VexArchS390X:
585         preciseMemExnsFn = guest_s390x_state_requires_precise_mem_exns;
586         disInstrFn       = disInstr_S390;
587         specHelper       = guest_s390x_spechelper;
588         guest_sizeB      = sizeof(VexGuestS390XState);
589         guest_word_type  = Ity_I64;
590         guest_layout     = &s390xGuest_layout;
591         offB_CMSTART     = offsetof(VexGuestS390XState,guest_CMSTART);
592         offB_CMLEN       = offsetof(VexGuestS390XState,guest_CMLEN);
593         offB_GUEST_IP          = offsetof(VexGuestS390XState,guest_IA);
594         szB_GUEST_IP           = sizeof( ((VexGuestS390XState*)0)->guest_IA);
595         offB_HOST_EvC_COUNTER  = offsetof(VexGuestS390XState,host_EvC_COUNTER);
596         offB_HOST_EvC_FAILADDR = offsetof(VexGuestS390XState,host_EvC_FAILADDR);
597         vassert(are_valid_hwcaps(VexArchS390X, vta->archinfo_guest.hwcaps));
598         vassert(0 == sizeof(VexGuestS390XState) % 16);
599         vassert(sizeof( ((VexGuestS390XState*)0)->guest_CMSTART    ) == 8);
600         vassert(sizeof( ((VexGuestS390XState*)0)->guest_CMLEN      ) == 8);
601         vassert(sizeof( ((VexGuestS390XState*)0)->guest_NRADDR     ) == 8);
602         break;
603
604      case VexArchARM:
605         preciseMemExnsFn       = guest_arm_state_requires_precise_mem_exns;
606         disInstrFn             = disInstr_ARM;
607         specHelper             = guest_arm_spechelper;
608         guest_sizeB            = sizeof(VexGuestARMState);
609         guest_word_type        = Ity_I32;
610         guest_layout           = &armGuest_layout;
611         offB_CMSTART           = offsetof(VexGuestARMState,guest_CMSTART);
612         offB_CMLEN             = offsetof(VexGuestARMState,guest_CMLEN);
613         offB_GUEST_IP          = offsetof(VexGuestARMState,guest_R15T);
614         szB_GUEST_IP           = sizeof( ((VexGuestARMState*)0)->guest_R15T );
615         offB_HOST_EvC_COUNTER  = offsetof(VexGuestARMState,host_EvC_COUNTER);
616         offB_HOST_EvC_FAILADDR = offsetof(VexGuestARMState,host_EvC_FAILADDR);
617         vassert(are_valid_hwcaps(VexArchARM, vta->archinfo_guest.hwcaps));
618         vassert(0 == sizeof(VexGuestARMState) % 16);
619         vassert(sizeof( ((VexGuestARMState*)0)->guest_CMSTART) == 4);
620         vassert(sizeof( ((VexGuestARMState*)0)->guest_CMLEN  ) == 4);
621         vassert(sizeof( ((VexGuestARMState*)0)->guest_NRADDR ) == 4);
622         break;
623
624      case VexArchARM64:
625         preciseMemExnsFn     = guest_arm64_state_requires_precise_mem_exns;
626         disInstrFn           = disInstr_ARM64;
627         specHelper           = guest_arm64_spechelper;
628         guest_sizeB          = sizeof(VexGuestARM64State);
629         guest_word_type      = Ity_I64;
630         guest_layout         = &arm64Guest_layout;
631         offB_CMSTART         = offsetof(VexGuestARM64State,guest_CMSTART);
632         offB_CMLEN           = offsetof(VexGuestARM64State,guest_CMLEN);
633         offB_GUEST_IP        = offsetof(VexGuestARM64State,guest_PC);
634         szB_GUEST_IP         = sizeof( ((VexGuestARM64State*)0)->guest_PC );
635         offB_HOST_EvC_COUNTER  = offsetof(VexGuestARM64State,host_EvC_COUNTER);
636         offB_HOST_EvC_FAILADDR = offsetof(VexGuestARM64State,host_EvC_FAILADDR);
637         vassert(are_valid_hwcaps(VexArchARM64, vta->archinfo_guest.hwcaps));
638         vassert(0 == sizeof(VexGuestARM64State) % 16);
639         vassert(sizeof( ((VexGuestARM64State*)0)->guest_CMSTART) == 8);
640         vassert(sizeof( ((VexGuestARM64State*)0)->guest_CMLEN  ) == 8);
641         vassert(sizeof( ((VexGuestARM64State*)0)->guest_NRADDR ) == 8);
642         break;
643
644      case VexArchMIPS32:
645         preciseMemExnsFn       = guest_mips32_state_requires_precise_mem_exns;
646         disInstrFn             = disInstr_MIPS;
647         specHelper             = guest_mips32_spechelper;
648         guest_sizeB            = sizeof(VexGuestMIPS32State);
649         guest_word_type        = Ity_I32;
650         guest_layout           = &mips32Guest_layout;
651         offB_CMSTART           = offsetof(VexGuestMIPS32State,guest_CMSTART);
652         offB_CMLEN             = offsetof(VexGuestMIPS32State,guest_CMLEN);
653         offB_GUEST_IP          = offsetof(VexGuestMIPS32State,guest_PC);
654         szB_GUEST_IP           = sizeof( ((VexGuestMIPS32State*)0)->guest_PC );
655         offB_HOST_EvC_COUNTER  = offsetof(VexGuestMIPS32State,host_EvC_COUNTER);
656         offB_HOST_EvC_FAILADDR = offsetof(VexGuestMIPS32State,host_EvC_FAILADDR);
657         vassert(are_valid_hwcaps(VexArchMIPS32, vta->archinfo_guest.hwcaps));
658         vassert(0 == sizeof(VexGuestMIPS32State) % 16);
659         vassert(sizeof( ((VexGuestMIPS32State*)0)->guest_CMSTART) == 4);
660         vassert(sizeof( ((VexGuestMIPS32State*)0)->guest_CMLEN  ) == 4);
661         vassert(sizeof( ((VexGuestMIPS32State*)0)->guest_NRADDR ) == 4);
662         break;
663
664      case VexArchMIPS64:
665         preciseMemExnsFn       = guest_mips64_state_requires_precise_mem_exns;
666         disInstrFn             = disInstr_MIPS;
667         specHelper             = guest_mips64_spechelper;
668         guest_sizeB            = sizeof(VexGuestMIPS64State);
669         guest_word_type        = Ity_I64;
670         guest_layout           = &mips64Guest_layout;
671         offB_CMSTART           = offsetof(VexGuestMIPS64State,guest_CMSTART);
672         offB_CMLEN             = offsetof(VexGuestMIPS64State,guest_CMLEN);
673         offB_GUEST_IP          = offsetof(VexGuestMIPS64State,guest_PC);
674         szB_GUEST_IP           = sizeof( ((VexGuestMIPS64State*)0)->guest_PC );
675         offB_HOST_EvC_COUNTER  = offsetof(VexGuestMIPS64State,host_EvC_COUNTER);
676         offB_HOST_EvC_FAILADDR = offsetof(VexGuestMIPS64State,host_EvC_FAILADDR);
677         vassert(are_valid_hwcaps(VexArchMIPS64, vta->archinfo_guest.hwcaps));
678         vassert(0 == sizeof(VexGuestMIPS64State) % 16);
679         vassert(sizeof( ((VexGuestMIPS64State*)0)->guest_CMSTART) == 8);
680         vassert(sizeof( ((VexGuestMIPS64State*)0)->guest_CMLEN  ) == 8);
681         vassert(sizeof( ((VexGuestMIPS64State*)0)->guest_NRADDR ) == 8);
682         break;
683
684      default:
685         vpanic("LibVEX_Translate: unsupported guest insn set");
686   }
687
688   /* Set up result struct. */
689   VexTranslateResult res;
690   res.status         = VexTransOK;
691   res.n_sc_extents   = 0;
692   res.offs_profInc   = -1;
693   res.n_guest_instrs = 0;
694
695   /* yet more sanity checks ... */
696   if (vta->arch_guest == vta->arch_host) {
697      /* doesn't necessarily have to be true, but if it isn't it means
698         we are simulating one flavour of an architecture a different
699         flavour of the same architecture, which is pretty strange. */
700      vassert(vta->archinfo_guest.hwcaps == vta->archinfo_host.hwcaps);
701   }
702
703   vexAllocSanityCheck();
704
705   if (vex_traceflags & VEX_TRACE_FE)
706      vex_printf("\n------------------------"
707                   " Front end "
708                   "------------------------\n\n");
709
710   irsb = bb_to_IR ( vta->guest_extents,
711                     &res.n_sc_extents,
712                     &res.n_guest_instrs,
713                     vta->callback_opaque,
714                     disInstrFn,
715                     vta->guest_bytes,
716                     vta->guest_bytes_addr,
717                     vta->chase_into_ok,
718                     host_is_bigendian,
719                     vta->sigill_diag,
720                     vta->arch_guest,
721                     &vta->archinfo_guest,
722                     &vta->abiinfo_both,
723                     guest_word_type,
724                     vta->needs_self_check,
725                     vta->preamble_function,
726                     offB_CMSTART,
727                     offB_CMLEN,
728                     offB_GUEST_IP,
729                     szB_GUEST_IP );
730
731   vexAllocSanityCheck();
732
733   if (irsb == NULL) {
734      /* Access failure. */
735      vexSetAllocModeTEMP_and_clear();
736      vex_traceflags = 0;
737      res.status = VexTransAccessFail; return res;
738   }
739
740   vassert(vta->guest_extents->n_used >= 1 && vta->guest_extents->n_used <= 3);
741   vassert(vta->guest_extents->base[0] == vta->guest_bytes_addr);
742   for (i = 0; i < vta->guest_extents->n_used; i++) {
743      vassert(vta->guest_extents->len[i] < 10000); /* sanity */
744   }
745
746   /* If debugging, show the raw guest bytes for this bb. */
747   if (0 || (vex_traceflags & VEX_TRACE_FE)) {
748      if (vta->guest_extents->n_used > 1) {
749         vex_printf("can't show code due to extents > 1\n");
750      } else {
751         /* HACK */
752         UChar* p = (UChar*)vta->guest_bytes;
753         UInt   sum = 0;
754         UInt   guest_bytes_read = (UInt)vta->guest_extents->len[0];
755         vex_printf("GuestBytes %llx %u ", vta->guest_bytes_addr,
756                                           guest_bytes_read );
757         for (i = 0; i < guest_bytes_read; i++) {
758            UInt b = (UInt)p[i];
759            vex_printf(" %02x", b );
760            sum = (sum << 1) ^ b;
761         }
762         vex_printf("  %08x\n\n", sum);
763      }
764   }
765
766   /* Sanity check the initial IR. */
767   sanityCheckIRSB( irsb, "initial IR",
768                    False/*can be non-flat*/, guest_word_type );
769
770   vexAllocSanityCheck();
771
772   /* Clean it up, hopefully a lot. */
773   irsb = do_iropt_BB ( irsb, specHelper, preciseMemExnsFn,
774                              vta->guest_bytes_addr,
775                              vta->arch_guest );
776   sanityCheckIRSB( irsb, "after initial iropt",
777                    True/*must be flat*/, guest_word_type );
778
779   if (vex_traceflags & VEX_TRACE_OPT1) {
780      vex_printf("\n------------------------"
781                   " After pre-instr IR optimisation "
782                   "------------------------\n\n");
783      ppIRSB ( irsb );
784      vex_printf("\n");
785   }
786
787   vexAllocSanityCheck();
788
789   /* Get the thing instrumented. */
790   if (vta->instrument1)
791      irsb = vta->instrument1(vta->callback_opaque,
792                              irsb, guest_layout,
793                              vta->guest_extents,
794                              &vta->archinfo_host,
795                              guest_word_type, host_word_type);
796   vexAllocSanityCheck();
797
798   if (vta->instrument2)
799      irsb = vta->instrument2(vta->callback_opaque,
800                              irsb, guest_layout,
801                              vta->guest_extents,
802                              &vta->archinfo_host,
803                              guest_word_type, host_word_type);
804
805   if (vex_traceflags & VEX_TRACE_INST) {
806      vex_printf("\n------------------------"
807                   " After instrumentation "
808                   "------------------------\n\n");
809      ppIRSB ( irsb );
810      vex_printf("\n");
811   }
812
813   if (vta->instrument1 || vta->instrument2)
814      sanityCheckIRSB( irsb, "after instrumentation",
815                       True/*must be flat*/, guest_word_type );
816
817   /* Do a post-instrumentation cleanup pass. */
818   if (vta->instrument1 || vta->instrument2) {
819      do_deadcode_BB( irsb );
820      irsb = cprop_BB( irsb );
821      do_deadcode_BB( irsb );
822      sanityCheckIRSB( irsb, "after post-instrumentation cleanup",
823                       True/*must be flat*/, guest_word_type );
824   }
825
826   vexAllocSanityCheck();
827
828   if (vex_traceflags & VEX_TRACE_OPT2) {
829      vex_printf("\n------------------------"
830                   " After post-instr IR optimisation "
831                   "------------------------\n\n");
832      ppIRSB ( irsb );
833      vex_printf("\n");
834   }
835
836   /* Turn it into virtual-registerised code.  Build trees -- this
837      also throws away any dead bindings. */
838   max_ga = ado_treebuild_BB( irsb, preciseMemExnsFn );
839
840   if (vta->finaltidy) {
841      irsb = vta->finaltidy(irsb);
842   }
843
844   vexAllocSanityCheck();
845
846   if (vex_traceflags & VEX_TRACE_TREES) {
847      vex_printf("\n------------------------"
848                   "  After tree-building "
849                   "------------------------\n\n");
850      ppIRSB ( irsb );
851      vex_printf("\n");
852   }
853
854   /* HACK */
855   if (0) {
856      *(vta->host_bytes_used) = 0;
857      res.status = VexTransOK; return res;
858   }
859   /* end HACK */
860
861   if (vex_traceflags & VEX_TRACE_VCODE)
862      vex_printf("\n------------------------"
863                   " Instruction selection "
864                   "------------------------\n");
865
866   /* No guest has its IP field at offset zero.  If this fails it
867      means some transformation pass somewhere failed to update/copy
868      irsb->offsIP properly. */
869   vassert(irsb->offsIP >= 16);
870
871   vcode = iselSB ( irsb, vta->arch_host,
872                    &vta->archinfo_host,
873                    &vta->abiinfo_both,
874                    offB_HOST_EvC_COUNTER,
875                    offB_HOST_EvC_FAILADDR,
876                    chainingAllowed,
877                    vta->addProfInc,
878                    max_ga );
879
880   vexAllocSanityCheck();
881
882   if (vex_traceflags & VEX_TRACE_VCODE)
883      vex_printf("\n");
884
885   if (vex_traceflags & VEX_TRACE_VCODE) {
886      for (i = 0; i < vcode->arr_used; i++) {
887         vex_printf("%3d   ", i);
888         ppInstr(vcode->arr[i], mode64);
889         vex_printf("\n");
890      }
891      vex_printf("\n");
892   }
893
894   /* Register allocate. */
895   rcode = doRegisterAllocation ( vcode, available_real_regs,
896                                  n_available_real_regs,
897                                  isMove, getRegUsage, mapRegs,
898                                  genSpill, genReload, directReload,
899                                  guest_sizeB,
900                                  ppInstr, ppReg, mode64 );
901
902   vexAllocSanityCheck();
903
904   if (vex_traceflags & VEX_TRACE_RCODE) {
905      vex_printf("\n------------------------"
906                   " Register-allocated code "
907                   "------------------------\n\n");
908      for (i = 0; i < rcode->arr_used; i++) {
909         vex_printf("%3d   ", i);
910         ppInstr(rcode->arr[i], mode64);
911         vex_printf("\n");
912      }
913      vex_printf("\n");
914   }
915
916   /* HACK */
917   if (0) {
918      *(vta->host_bytes_used) = 0;
919      res.status = VexTransOK; return res;
920   }
921   /* end HACK */
922
923   /* Assemble */
924   if (vex_traceflags & VEX_TRACE_ASM) {
925      vex_printf("\n------------------------"
926                   " Assembly "
927                   "------------------------\n\n");
928   }
929
930   out_used = 0; /* tracks along the host_bytes array */
931   for (i = 0; i < rcode->arr_used; i++) {
932      HInstr* hi           = rcode->arr[i];
933      Bool    hi_isProfInc = False;
934      if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
935         ppInstr(hi, mode64);
936         vex_printf("\n");
937      }
938      j = emit( &hi_isProfInc,
939                insn_bytes, sizeof insn_bytes, hi, mode64,
940                vta->disp_cp_chain_me_to_slowEP,
941                vta->disp_cp_chain_me_to_fastEP,
942                vta->disp_cp_xindir,
943                vta->disp_cp_xassisted );
944      if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
945         for (k = 0; k < j; k++)
946            if (insn_bytes[k] < 16)
947               vex_printf("0%x ",  (UInt)insn_bytes[k]);
948            else
949               vex_printf("%x ", (UInt)insn_bytes[k]);
950         vex_printf("\n\n");
951      }
952      if (UNLIKELY(out_used + j > vta->host_bytes_size)) {
953         vexSetAllocModeTEMP_and_clear();
954         vex_traceflags = 0;
955         res.status = VexTransOutputFull;
956         return res;
957      }
958      if (UNLIKELY(hi_isProfInc)) {
959         vassert(vta->addProfInc); /* else where did it come from? */
960         vassert(res.offs_profInc == -1); /* there can be only one (tm) */
961         vassert(out_used >= 0);
962         res.offs_profInc = out_used;
963      }
964      { UChar* dst = &vta->host_bytes[out_used];
965        for (k = 0; k < j; k++) {
966           dst[k] = insn_bytes[k];
967        }
968        out_used += j;
969      }
970      vassert(out_used <= vta->host_bytes_size);
971   }
972   *(vta->host_bytes_used) = out_used;
973
974   vexAllocSanityCheck();
975
976   vexSetAllocModeTEMP_and_clear();
977
978   if (vex_traceflags) {
979      /* Print the expansion ratio for this SB. */
980      j = 0; /* total guest bytes */
981      for (i = 0; i < vta->guest_extents->n_used; i++) {
982         j += vta->guest_extents->len[i];
983      }
984      if (1) vex_printf("VexExpansionRatio %d %d   %d :10\n\n",
985                        j, out_used, (10 * out_used) / (j == 0 ? 1 : j));
986   }
987
988   vex_traceflags = 0;
989   res.status = VexTransOK;
990   return res;
991}
992
993
994/* --------- Chain/Unchain XDirects. --------- */
995
996VexInvalRange LibVEX_Chain ( VexArch arch_host,
997                             void*   place_to_chain,
998                             void*   disp_cp_chain_me_EXPECTED,
999                             void*   place_to_jump_to )
1000{
1001   VexInvalRange (*chainXDirect)(void*, void*, void*) = NULL;
1002   switch (arch_host) {
1003      case VexArchX86:
1004         chainXDirect = chainXDirect_X86; break;
1005      case VexArchAMD64:
1006         chainXDirect = chainXDirect_AMD64; break;
1007      case VexArchARM:
1008         chainXDirect = chainXDirect_ARM; break;
1009      case VexArchARM64:
1010         chainXDirect = chainXDirect_ARM64; break;
1011      case VexArchS390X:
1012         chainXDirect = chainXDirect_S390; break;
1013      case VexArchPPC32:
1014         return chainXDirect_PPC(place_to_chain,
1015                                 disp_cp_chain_me_EXPECTED,
1016                                 place_to_jump_to, False/*!mode64*/);
1017      case VexArchPPC64:
1018         return chainXDirect_PPC(place_to_chain,
1019                                 disp_cp_chain_me_EXPECTED,
1020                                 place_to_jump_to, True/*mode64*/);
1021      case VexArchMIPS32:
1022         return chainXDirect_MIPS(place_to_chain,
1023                                  disp_cp_chain_me_EXPECTED,
1024                                  place_to_jump_to, False/*!mode64*/);
1025      case VexArchMIPS64:
1026         return chainXDirect_MIPS(place_to_chain,
1027                                  disp_cp_chain_me_EXPECTED,
1028                                  place_to_jump_to, True/*!mode64*/);
1029      default:
1030         vassert(0);
1031   }
1032   vassert(chainXDirect);
1033   VexInvalRange vir
1034      = chainXDirect(place_to_chain, disp_cp_chain_me_EXPECTED,
1035                     place_to_jump_to);
1036   return vir;
1037}
1038
1039VexInvalRange LibVEX_UnChain ( VexArch arch_host,
1040                               void*   place_to_unchain,
1041                               void*   place_to_jump_to_EXPECTED,
1042                               void*   disp_cp_chain_me )
1043{
1044   VexInvalRange (*unchainXDirect)(void*, void*, void*) = NULL;
1045   switch (arch_host) {
1046      case VexArchX86:
1047         unchainXDirect = unchainXDirect_X86; break;
1048      case VexArchAMD64:
1049         unchainXDirect = unchainXDirect_AMD64; break;
1050      case VexArchARM:
1051         unchainXDirect = unchainXDirect_ARM; break;
1052      case VexArchARM64:
1053         unchainXDirect = unchainXDirect_ARM64; break;
1054      case VexArchS390X:
1055         unchainXDirect = unchainXDirect_S390; break;
1056      case VexArchPPC32:
1057         return unchainXDirect_PPC(place_to_unchain,
1058                                   place_to_jump_to_EXPECTED,
1059                                   disp_cp_chain_me, False/*!mode64*/);
1060      case VexArchPPC64:
1061         return unchainXDirect_PPC(place_to_unchain,
1062                                   place_to_jump_to_EXPECTED,
1063                                   disp_cp_chain_me, True/*mode64*/);
1064      case VexArchMIPS32:
1065         return unchainXDirect_MIPS(place_to_unchain,
1066                                    place_to_jump_to_EXPECTED,
1067                                    disp_cp_chain_me, False/*!mode64*/);
1068      case VexArchMIPS64:
1069         return unchainXDirect_MIPS(place_to_unchain,
1070                                    place_to_jump_to_EXPECTED,
1071                                    disp_cp_chain_me, True/*!mode64*/);
1072      default:
1073         vassert(0);
1074   }
1075   vassert(unchainXDirect);
1076   VexInvalRange vir
1077      = unchainXDirect(place_to_unchain, place_to_jump_to_EXPECTED,
1078                       disp_cp_chain_me);
1079   return vir;
1080}
1081
1082Int LibVEX_evCheckSzB ( VexArch arch_host )
1083{
1084   static Int cached = 0; /* DO NOT MAKE NON-STATIC */
1085   if (UNLIKELY(cached == 0)) {
1086      switch (arch_host) {
1087         case VexArchX86:
1088            cached = evCheckSzB_X86(); break;
1089         case VexArchAMD64:
1090            cached = evCheckSzB_AMD64(); break;
1091         case VexArchARM:
1092            cached = evCheckSzB_ARM(); break;
1093         case VexArchARM64:
1094            cached = evCheckSzB_ARM64(); break;
1095         case VexArchS390X:
1096            cached = evCheckSzB_S390(); break;
1097         case VexArchPPC32:
1098         case VexArchPPC64:
1099            cached = evCheckSzB_PPC(); break;
1100         case VexArchMIPS32:
1101         case VexArchMIPS64:
1102            cached = evCheckSzB_MIPS(); break;
1103         default:
1104            vassert(0);
1105      }
1106   }
1107   return cached;
1108}
1109
1110VexInvalRange LibVEX_PatchProfInc ( VexArch arch_host,
1111                                    void*   place_to_patch,
1112                                    ULong*  location_of_counter )
1113{
1114   VexInvalRange (*patchProfInc)(void*,ULong*) = NULL;
1115   switch (arch_host) {
1116      case VexArchX86:
1117         patchProfInc = patchProfInc_X86; break;
1118      case VexArchAMD64:
1119         patchProfInc = patchProfInc_AMD64; break;
1120      case VexArchARM:
1121         patchProfInc = patchProfInc_ARM; break;
1122      case VexArchS390X:
1123         patchProfInc = patchProfInc_S390; break;
1124      case VexArchPPC32:
1125         return patchProfInc_PPC(place_to_patch,
1126                                 location_of_counter, False/*!mode64*/);
1127      case VexArchPPC64:
1128         return patchProfInc_PPC(place_to_patch,
1129                                 location_of_counter, True/*mode64*/);
1130      case VexArchMIPS32:
1131         return patchProfInc_MIPS(place_to_patch,
1132                                  location_of_counter, False/*!mode64*/);
1133      case VexArchMIPS64:
1134         return patchProfInc_MIPS(place_to_patch,
1135                                  location_of_counter, True/*!mode64*/);
1136      default:
1137         vassert(0);
1138   }
1139   vassert(patchProfInc);
1140   VexInvalRange vir
1141      = patchProfInc(place_to_patch, location_of_counter);
1142   return vir;
1143}
1144
1145
1146/* --------- Emulation warnings. --------- */
1147
1148const HChar* LibVEX_EmNote_string ( VexEmNote ew )
1149{
1150   switch (ew) {
1151     case EmNote_NONE:
1152        return "none";
1153     case EmWarn_X86_x87exns:
1154        return "Unmasking x87 FP exceptions";
1155     case EmWarn_X86_x87precision:
1156        return "Selection of non-80-bit x87 FP precision";
1157     case EmWarn_X86_sseExns:
1158        return "Unmasking SSE FP exceptions";
1159     case EmWarn_X86_fz:
1160        return "Setting %mxcsr.fz (SSE flush-underflows-to-zero mode)";
1161     case EmWarn_X86_daz:
1162        return "Setting %mxcsr.daz (SSE treat-denormals-as-zero mode)";
1163     case EmWarn_X86_acFlag:
1164        return "Setting %eflags.ac (setting noted but ignored)";
1165     case EmWarn_PPCexns:
1166        return "Unmasking PPC32/64 FP exceptions";
1167     case EmWarn_PPC64_redir_overflow:
1168        return "PPC64 function redirection stack overflow";
1169     case EmWarn_PPC64_redir_underflow:
1170        return "PPC64 function redirection stack underflow";
1171     case EmWarn_S390X_fpext_rounding:
1172        return "The specified rounding mode cannot be supported. That\n"
1173               "  feature requires the floating point extension facility.\n"
1174               "  which is not available on this host. Continuing using\n"
1175               "  the rounding mode from FPC. Results may differ!";
1176     case EmWarn_S390X_invalid_rounding:
1177        return "The specified rounding mode is invalid.\n"
1178               "  Continuing using 'round to nearest'. Results may differ!";
1179     case EmFail_S390X_stfle:
1180        return "Instruction stfle is not supported on this host";
1181     case EmFail_S390X_stckf:
1182        return "Instruction stckf is not supported on this host";
1183     case EmFail_S390X_ecag:
1184        return "Instruction ecag is not supported on this host";
1185     case EmFail_S390X_fpext:
1186        return "Encountered an instruction that requires the floating "
1187               "point extension facility.\n"
1188               "  That facility is not available on this host";
1189     case EmFail_S390X_invalid_PFPO_rounding_mode:
1190        return "The rounding mode specified in GPR 0 for PFPO instruction"
1191               " is invalid";
1192     case EmFail_S390X_invalid_PFPO_function:
1193        return "The function code specified in GPR 0 for PFPO instruction"
1194               " is invalid";
1195     default:
1196        vpanic("LibVEX_EmNote_string: unknown warning");
1197   }
1198}
1199
1200/* ------------------ Arch/HwCaps stuff. ------------------ */
1201
1202const HChar* LibVEX_ppVexArch ( VexArch arch )
1203{
1204   switch (arch) {
1205      case VexArch_INVALID: return "INVALID";
1206      case VexArchX86:      return "X86";
1207      case VexArchAMD64:    return "AMD64";
1208      case VexArchARM:      return "ARM";
1209      case VexArchARM64:    return "ARM64";
1210      case VexArchPPC32:    return "PPC32";
1211      case VexArchPPC64:    return "PPC64";
1212      case VexArchS390X:    return "S390X";
1213      case VexArchMIPS32:   return "MIPS32";
1214      case VexArchMIPS64:   return "MIPS64";
1215      default:              return "VexArch???";
1216   }
1217}
1218
1219const HChar* LibVEX_ppVexHwCaps ( VexArch arch, UInt hwcaps )
1220{
1221   const HChar* str = show_hwcaps(arch,hwcaps);
1222   return str ? str : "INVALID";
1223}
1224
1225
1226/* Write default settings info *vai. */
1227void LibVEX_default_VexArchInfo ( /*OUT*/VexArchInfo* vai )
1228{
1229   vex_bzero(vai, sizeof(*vai));
1230   vai->hwcaps              = 0;
1231   vai->ppc_icache_line_szB = 0;
1232   vai->ppc_dcbz_szB        = 0;
1233   vai->ppc_dcbzl_szB       = 0;
1234   vai->arm64_dMinLine_lg2_szB  = 0;
1235   vai->arm64_iMinLine_lg2_szB  = 0;
1236   vai->hwcache_info.num_levels = 0;
1237   vai->hwcache_info.num_caches = 0;
1238   vai->hwcache_info.caches     = NULL;
1239   vai->hwcache_info.icaches_maintain_coherence = True;  // whatever
1240}
1241
1242/* Write default settings info *vbi. */
1243void LibVEX_default_VexAbiInfo ( /*OUT*/VexAbiInfo* vbi )
1244{
1245   vex_bzero(vbi, sizeof(*vbi));
1246   vbi->guest_stack_redzone_size       = 0;
1247   vbi->guest_amd64_assume_fs_is_zero  = False;
1248   vbi->guest_amd64_assume_gs_is_0x60  = False;
1249   vbi->guest_ppc_zap_RZ_at_blr        = False;
1250   vbi->guest_ppc_zap_RZ_at_bl         = NULL;
1251   vbi->guest_ppc_sc_continues_at_LR   = False;
1252   vbi->host_ppc_calls_use_fndescrs    = False;
1253   vbi->host_ppc32_regalign_int64_args = False;
1254}
1255
1256
1257/* Return a string showing the hwcaps in a nice way.  The string will
1258   be NULL for invalid combinations of flags, so these functions also
1259   serve as a way to validate hwcaps values. */
1260
1261static const HChar* show_hwcaps_x86 ( UInt hwcaps )
1262{
1263   /* Monotonic, LZCNT > SSE3 > SSE2 > SSE1 > MMXEXT > baseline. */
1264   switch (hwcaps) {
1265      case 0:
1266         return "x86-sse0";
1267      case VEX_HWCAPS_X86_MMXEXT:
1268         return "x86-mmxext";
1269      case VEX_HWCAPS_X86_MMXEXT | VEX_HWCAPS_X86_SSE1:
1270         return "x86-mmxext-sse1";
1271      case VEX_HWCAPS_X86_MMXEXT | VEX_HWCAPS_X86_SSE1 | VEX_HWCAPS_X86_SSE2:
1272         return "x86-mmxext-sse1-sse2";
1273      case VEX_HWCAPS_X86_MMXEXT | VEX_HWCAPS_X86_SSE1 | VEX_HWCAPS_X86_SSE2
1274           | VEX_HWCAPS_X86_LZCNT:
1275         return "x86-mmxext-sse1-sse2-lzcnt";
1276      case VEX_HWCAPS_X86_MMXEXT | VEX_HWCAPS_X86_SSE1 | VEX_HWCAPS_X86_SSE2
1277           | VEX_HWCAPS_X86_SSE3:
1278         return "x86-mmxext-sse1-sse2-sse3";
1279      case VEX_HWCAPS_X86_MMXEXT | VEX_HWCAPS_X86_SSE1 | VEX_HWCAPS_X86_SSE2
1280           | VEX_HWCAPS_X86_SSE3 | VEX_HWCAPS_X86_LZCNT:
1281         return "x86-mmxext-sse1-sse2-sse3-lzcnt";
1282      default:
1283         return NULL;
1284   }
1285}
1286
1287static const HChar* show_hwcaps_amd64 ( UInt hwcaps )
1288{
1289   /* SSE3 and CX16 are orthogonal and > baseline, although we really
1290      don't expect to come across anything which can do SSE3 but can't
1291      do CX16.  Still, we can handle that case.  LZCNT is similarly
1292      orthogonal. */
1293
1294   /* Throw out obviously stupid cases: */
1295   Bool have_sse3 = (hwcaps & VEX_HWCAPS_AMD64_SSE3) != 0;
1296   Bool have_avx  = (hwcaps & VEX_HWCAPS_AMD64_AVX)  != 0;
1297   Bool have_bmi  = (hwcaps & VEX_HWCAPS_AMD64_BMI)  != 0;
1298   Bool have_avx2 = (hwcaps & VEX_HWCAPS_AMD64_AVX2) != 0;
1299   /* AVX without SSE3 */
1300   if (have_avx && !have_sse3)
1301      return NULL;
1302   /* AVX2 or BMI without AVX */
1303   if ((have_avx2 || have_bmi) && !have_avx)
1304      return NULL;
1305
1306   /* This isn't threadsafe.  We might need to fix it at some point. */
1307   static HChar buf[100] = { 0 };
1308   if (buf[0] != 0) return buf; /* already constructed */
1309
1310   vex_bzero(buf, sizeof(buf));
1311
1312   HChar* p = &buf[0];
1313
1314   p = p + vex_sprintf(p, "%s", "amd64");
1315   if (hwcaps == 0) {
1316      /* special-case the baseline case */
1317      p = p + vex_sprintf(p, "%s", "-sse2");
1318      goto out;
1319   }
1320   if (hwcaps & VEX_HWCAPS_AMD64_CX16) {
1321      p = p + vex_sprintf(p, "%s", "-cx16");
1322   }
1323   if (hwcaps & VEX_HWCAPS_AMD64_LZCNT) {
1324      p = p + vex_sprintf(p, "%s", "-lzcnt");
1325   }
1326   if (hwcaps & VEX_HWCAPS_AMD64_RDTSCP) {
1327      p = p + vex_sprintf(p, "%s", "-rdtscp");
1328   }
1329   if (hwcaps & VEX_HWCAPS_AMD64_SSE3) {
1330      p = p + vex_sprintf(p, "%s", "-sse3");
1331   }
1332   if (hwcaps & VEX_HWCAPS_AMD64_AVX) {
1333      p = p + vex_sprintf(p, "%s", "-avx");
1334   }
1335   if (hwcaps & VEX_HWCAPS_AMD64_AVX2) {
1336      p = p + vex_sprintf(p, "%s", "-avx2");
1337   }
1338   if (hwcaps & VEX_HWCAPS_AMD64_BMI) {
1339      p = p + vex_sprintf(p, "%s", "-bmi");
1340   }
1341
1342  out:
1343   vassert(buf[sizeof(buf)-1] == 0);
1344   return buf;
1345}
1346
1347static const HChar* show_hwcaps_ppc32 ( UInt hwcaps )
1348{
1349   /* Monotonic with complications.  Basically V > F > baseline,
1350      but once you have F then you can have FX or GX too. */
1351   const UInt F  = VEX_HWCAPS_PPC32_F;
1352   const UInt V  = VEX_HWCAPS_PPC32_V;
1353   const UInt FX = VEX_HWCAPS_PPC32_FX;
1354   const UInt GX = VEX_HWCAPS_PPC32_GX;
1355   const UInt VX = VEX_HWCAPS_PPC32_VX;
1356   const UInt DFP = VEX_HWCAPS_PPC32_DFP;
1357   const UInt ISA2_07 = VEX_HWCAPS_PPC32_ISA2_07;
1358         UInt c  = hwcaps;
1359   if (c == 0)           return "ppc32-int";
1360   if (c == F)           return "ppc32-int-flt";
1361   if (c == (F|FX))      return "ppc32-int-flt-FX";
1362   if (c == (F|GX))      return "ppc32-int-flt-GX";
1363   if (c == (F|FX|GX))   return "ppc32-int-flt-FX-GX";
1364   if (c == (F|V))       return "ppc32-int-flt-vmx";
1365   if (c == (F|V|FX))    return "ppc32-int-flt-vmx-FX";
1366   if (c == (F|V|GX))    return "ppc32-int-flt-vmx-GX";
1367   if (c == (F|V|FX|GX)) return "ppc32-int-flt-vmx-FX-GX";
1368   if (c == (F|V|FX|GX|DFP))    return "ppc32-int-flt-vmx-FX-GX-DFP";
1369   if (c == (F|V|FX|GX|VX|DFP)) return "ppc32-int-flt-vmx-FX-GX-VX-DFP";
1370   if (c == (F|V|FX|GX|VX|DFP|ISA2_07))
1371      return "ppc32-int-flt-vmx-FX-GX-VX-DFP-ISA2_07";
1372
1373   return NULL;
1374}
1375
1376static const HChar* show_hwcaps_ppc64 ( UInt hwcaps )
1377{
1378   /* Monotonic with complications.  Basically V > baseline(==F),
1379      but once you have F then you can have FX or GX too. */
1380   const UInt V  = VEX_HWCAPS_PPC64_V;
1381   const UInt FX = VEX_HWCAPS_PPC64_FX;
1382   const UInt GX = VEX_HWCAPS_PPC64_GX;
1383   const UInt VX = VEX_HWCAPS_PPC64_VX;
1384   const UInt DFP = VEX_HWCAPS_PPC64_DFP;
1385   const UInt ISA2_07 = VEX_HWCAPS_PPC64_ISA2_07;
1386         UInt c  = hwcaps;
1387   if (c == 0)         return "ppc64-int-flt";
1388   if (c == FX)        return "ppc64-int-flt-FX";
1389   if (c == GX)        return "ppc64-int-flt-GX";
1390   if (c == (FX|GX))   return "ppc64-int-flt-FX-GX";
1391   if (c == V)         return "ppc64-int-flt-vmx";
1392   if (c == (V|FX))    return "ppc64-int-flt-vmx-FX";
1393   if (c == (V|GX))    return "ppc64-int-flt-vmx-GX";
1394   if (c == (V|FX|GX)) return "ppc64-int-flt-vmx-FX-GX";
1395   if (c == (V|FX|GX|DFP))    return "ppc64-int-flt-vmx-FX-GX-DFP";
1396   if (c == (V|FX|GX|VX|DFP)) return "ppc64-int-flt-vmx-FX-GX-VX-DFP";
1397   if (c == (V|FX|GX|VX|DFP|ISA2_07))
1398      return "ppc64-int-flt-vmx-FX-GX-VX-DFP-ISA2_07";
1399   return NULL;
1400}
1401
1402static const HChar* show_hwcaps_arm ( UInt hwcaps )
1403{
1404   Bool N = ((hwcaps & VEX_HWCAPS_ARM_NEON) != 0);
1405   Bool vfp = ((hwcaps & (VEX_HWCAPS_ARM_VFP |
1406               VEX_HWCAPS_ARM_VFP2 | VEX_HWCAPS_ARM_VFP3)) != 0);
1407   switch (VEX_ARM_ARCHLEVEL(hwcaps)) {
1408      case 5:
1409         if (N)
1410            return NULL;
1411         if (vfp)
1412            return "ARMv5-vfp";
1413         else
1414            return "ARMv5";
1415         return NULL;
1416      case 6:
1417         if (N)
1418            return NULL;
1419         if (vfp)
1420            return "ARMv6-vfp";
1421         else
1422            return "ARMv6";
1423         return NULL;
1424      case 7:
1425         if (vfp) {
1426            if (N)
1427               return "ARMv7-vfp-neon";
1428            else
1429               return "ARMv7-vfp";
1430         } else {
1431            if (N)
1432               return "ARMv7-neon";
1433            else
1434               return "ARMv7";
1435         }
1436      default:
1437         return NULL;
1438   }
1439   return NULL;
1440}
1441
1442static const HChar* show_hwcaps_arm64 ( UInt hwcaps )
1443{
1444   /* Since there are no variants, just insist that hwcaps is zero,
1445      and declare it invalid otherwise. */
1446  if (hwcaps == 0)
1447     return "baseline";
1448  return NULL;
1449}
1450
1451static const HChar* show_hwcaps_s390x ( UInt hwcaps )
1452{
1453   static const HChar prefix[] = "s390x";
1454   static const struct {
1455      UInt  hwcaps_bit;
1456      HChar name[6];
1457   } hwcaps_list[] = {
1458      { VEX_HWCAPS_S390X_LDISP, "ldisp" },
1459      { VEX_HWCAPS_S390X_EIMM,  "eimm" },
1460      { VEX_HWCAPS_S390X_GIE,   "gie" },
1461      { VEX_HWCAPS_S390X_DFP,   "dfp" },
1462      { VEX_HWCAPS_S390X_FGX,   "fgx" },
1463      { VEX_HWCAPS_S390X_STFLE, "stfle" },
1464      { VEX_HWCAPS_S390X_ETF2,  "etf2" },
1465      { VEX_HWCAPS_S390X_ETF3,  "etf3" },
1466      { VEX_HWCAPS_S390X_STCKF, "stckf" },
1467      { VEX_HWCAPS_S390X_FPEXT, "fpext" },
1468      { VEX_HWCAPS_S390X_LSC,   "lsc" },
1469      { VEX_HWCAPS_S390X_PFPO,  "pfpo" },
1470   };
1471#define NUM_HWCAPS (sizeof hwcaps_list / sizeof hwcaps_list[0])
1472   static HChar buf[sizeof prefix +
1473                    NUM_HWCAPS * (sizeof hwcaps_list[0].name + 1) +
1474                    1];  // '\0'
1475   HChar *p;
1476   UInt i;
1477
1478   if (buf[0] != '\0') return buf;  /* already constructed */
1479
1480   hwcaps = VEX_HWCAPS_S390X(hwcaps);
1481
1482   p = buf + vex_sprintf(buf, "%s", prefix);
1483   for (i = 0 ; i < NUM_HWCAPS; ++i) {
1484      if (hwcaps & hwcaps_list[i].hwcaps_bit)
1485         p = p + vex_sprintf(p, "-%s", hwcaps_list[i].name);
1486   }
1487
1488   /* If there are no facilities, add "zarch" */
1489   if (hwcaps == 0)
1490     vex_sprintf(p, "-%s", "zarch");
1491
1492   return buf;
1493}
1494
1495static const HChar* show_hwcaps_mips32 ( UInt hwcaps )
1496{
1497   /* MIPS baseline. */
1498   if (VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_MIPS) {
1499      /* MIPS baseline with dspr2. */
1500      if (VEX_MIPS_PROC_DSP2(hwcaps)) {
1501         return "MIPS-baseline-dspr2";
1502      }
1503      /* MIPS baseline with dsp. */
1504      if (VEX_MIPS_PROC_DSP(hwcaps)) {
1505         return "MIPS-baseline-dsp";
1506      }
1507      return "MIPS-baseline";
1508   }
1509
1510   /* Broadcom baseline. */
1511   if (VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_BROADCOM) {
1512      return "Broadcom-baseline";
1513   }
1514
1515   /* Netlogic baseline. */
1516   if (VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_NETLOGIC) {
1517      return "Netlogic-baseline";
1518   }
1519
1520   /* Cavium baseline. */
1521   if (VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_CAVIUM) {
1522      return "Cavium-baseline";
1523   }
1524
1525   return NULL;
1526}
1527
1528static const HChar* show_hwcaps_mips64 ( UInt hwcaps )
1529{
1530   return "mips64-baseline";
1531}
1532
1533/* ---- */
1534static const HChar* show_hwcaps ( VexArch arch, UInt hwcaps )
1535{
1536   switch (arch) {
1537      case VexArchX86:    return show_hwcaps_x86(hwcaps);
1538      case VexArchAMD64:  return show_hwcaps_amd64(hwcaps);
1539      case VexArchPPC32:  return show_hwcaps_ppc32(hwcaps);
1540      case VexArchPPC64:  return show_hwcaps_ppc64(hwcaps);
1541      case VexArchARM:    return show_hwcaps_arm(hwcaps);
1542      case VexArchARM64:  return show_hwcaps_arm64(hwcaps);
1543      case VexArchS390X:  return show_hwcaps_s390x(hwcaps);
1544      case VexArchMIPS32: return show_hwcaps_mips32(hwcaps);
1545      case VexArchMIPS64: return show_hwcaps_mips64(hwcaps);
1546      default: return NULL;
1547   }
1548}
1549
1550static Bool are_valid_hwcaps ( VexArch arch, UInt hwcaps )
1551{
1552   return show_hwcaps(arch,hwcaps) != NULL;
1553}
1554
1555
1556/*---------------------------------------------------------------*/
1557/*--- end                                         main_main.c ---*/
1558/*---------------------------------------------------------------*/
1559