guest_arm_helpers.c revision e0542187817e09818c7eba042a1c8ebc5b85544c
1
2/*---------------------------------------------------------------*/
3/*--- begin                               guest_arm_helpers.c ---*/
4/*---------------------------------------------------------------*/
5
6/*
7   This file is part of Valgrind, a dynamic binary instrumentation
8   framework.
9
10   Copyright (C) 2004-2013 OpenWorks LLP
11      info@open-works.net
12
13   This program is free software; you can redistribute it and/or
14   modify it under the terms of the GNU General Public License as
15   published by the Free Software Foundation; either version 2 of the
16   License, or (at your option) any later version.
17
18   This program is distributed in the hope that it will be useful, but
19   WITHOUT ANY WARRANTY; without even the implied warranty of
20   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21   General Public License for more details.
22
23   You should have received a copy of the GNU General Public License
24   along with this program; if not, write to the Free Software
25   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26   02110-1301, USA.
27
28   The GNU General Public License is contained in the file COPYING.
29*/
30
31#include "libvex_basictypes.h"
32#include "libvex_emnote.h"
33#include "libvex_guest_arm.h"
34#include "libvex_ir.h"
35#include "libvex.h"
36
37#include "main_util.h"
38#include "main_globals.h"
39#include "guest_generic_bb_to_IR.h"
40#include "guest_arm_defs.h"
41
42
43/* This file contains helper functions for arm guest code.  Calls to
44   these functions are generated by the back end.  These calls are of
45   course in the host machine code and this file will be compiled to
46   host machine code, so that all makes sense.
47
48   Only change the signatures of these helper functions very
49   carefully.  If you change the signature here, you'll have to change
50   the parameters passed to it in the IR calls constructed by
51   guest-arm/toIR.c.
52*/
53
54
55/* Set to 1 to get detailed profiling info about individual N, Z, C
56   and V flag evaluation. */
57#define PROFILE_NZCV_FLAGS 0
58
59#if PROFILE_NZCV_FLAGS
60
61static UInt tab_n_eval[ARMG_CC_OP_NUMBER];
62static UInt tab_z_eval[ARMG_CC_OP_NUMBER];
63static UInt tab_c_eval[ARMG_CC_OP_NUMBER];
64static UInt tab_v_eval[ARMG_CC_OP_NUMBER];
65static UInt initted = 0;
66static UInt tot_evals = 0;
67
68static void initCounts ( void )
69{
70   UInt i;
71   for (i = 0; i < ARMG_CC_OP_NUMBER; i++) {
72      tab_n_eval[i] = tab_z_eval[i] = tab_c_eval[i] = tab_v_eval[i] = 0;
73   }
74   initted = 1;
75}
76
77static void showCounts ( void )
78{
79   UInt i;
80   vex_printf("\n                 N          Z          C          V\n");
81   vex_printf(  "---------------------------------------------------\n");
82   for (i = 0; i < ARMG_CC_OP_NUMBER; i++) {
83      vex_printf("CC_OP=%d  %9d  %9d  %9d  %9d\n",
84                 i,
85                 tab_n_eval[i], tab_z_eval[i],
86                 tab_c_eval[i], tab_v_eval[i] );
87    }
88}
89
90#define NOTE_N_EVAL(_cc_op) NOTE_EVAL(_cc_op, tab_n_eval)
91#define NOTE_Z_EVAL(_cc_op) NOTE_EVAL(_cc_op, tab_z_eval)
92#define NOTE_C_EVAL(_cc_op) NOTE_EVAL(_cc_op, tab_c_eval)
93#define NOTE_V_EVAL(_cc_op) NOTE_EVAL(_cc_op, tab_v_eval)
94
95#define NOTE_EVAL(_cc_op, _tab) \
96   do { \
97      if (!initted) initCounts(); \
98      vassert( ((UInt)(_cc_op)) < ARMG_CC_OP_NUMBER); \
99      _tab[(UInt)(_cc_op)]++; \
100      tot_evals++; \
101      if (0 == (tot_evals & 0xFFFFF)) \
102        showCounts(); \
103   } while (0)
104
105#endif /* PROFILE_NZCV_FLAGS */
106
107
108/* Calculate the N flag from the supplied thunk components, in the
109   least significant bit of the word.  Returned bits 31:1 are zero. */
110static
111UInt armg_calculate_flag_n ( UInt cc_op, UInt cc_dep1,
112                             UInt cc_dep2, UInt cc_dep3 )
113{
114#  if PROFILE_NZCV_FLAGS
115   NOTE_N_EVAL(cc_op);
116#  endif
117
118   switch (cc_op) {
119      case ARMG_CC_OP_COPY: {
120         /* (nzcv:28x0, unused, unused) */
121         UInt nf   = (cc_dep1 >> ARMG_CC_SHIFT_N) & 1;
122         return nf;
123      }
124      case ARMG_CC_OP_ADD: {
125         /* (argL, argR, unused) */
126         UInt argL = cc_dep1;
127         UInt argR = cc_dep2;
128         UInt res  = argL + argR;
129         UInt nf   = res >> 31;
130         return nf;
131      }
132      case ARMG_CC_OP_SUB: {
133         /* (argL, argR, unused) */
134         UInt argL = cc_dep1;
135         UInt argR = cc_dep2;
136         UInt res  = argL - argR;
137         UInt nf   = res >> 31;
138         return nf;
139      }
140      case ARMG_CC_OP_ADC: {
141         /* (argL, argR, oldC) */
142         UInt argL = cc_dep1;
143         UInt argR = cc_dep2;
144         UInt oldC = cc_dep3;
145         vassert((oldC & ~1) == 0);
146         UInt res  = argL + argR + oldC;
147         UInt nf   = res >> 31;
148         return nf;
149      }
150      case ARMG_CC_OP_SBB: {
151         /* (argL, argR, oldC) */
152         UInt argL = cc_dep1;
153         UInt argR = cc_dep2;
154         UInt oldC = cc_dep3;
155         vassert((oldC & ~1) == 0);
156         UInt res  = argL - argR - (oldC ^ 1);
157         UInt nf   = res >> 31;
158         return nf;
159      }
160      case ARMG_CC_OP_LOGIC: {
161         /* (res, shco, oldV) */
162         UInt res  = cc_dep1;
163         UInt nf   = res >> 31;
164         return nf;
165      }
166      case ARMG_CC_OP_MUL: {
167         /* (res, unused, oldC:oldV) */
168         UInt res  = cc_dep1;
169         UInt nf   = res >> 31;
170         return nf;
171      }
172      case ARMG_CC_OP_MULL: {
173         /* (resLo32, resHi32, oldC:oldV) */
174         UInt resHi32 = cc_dep2;
175         UInt nf      = resHi32 >> 31;
176         return nf;
177      }
178      default:
179         /* shouldn't really make these calls from generated code */
180         vex_printf("armg_calculate_flag_n"
181                    "( op=%u, dep1=0x%x, dep2=0x%x, dep3=0x%x )\n",
182                    cc_op, cc_dep1, cc_dep2, cc_dep3 );
183         vpanic("armg_calculate_flags_n");
184   }
185}
186
187
188/* Calculate the Z flag from the supplied thunk components, in the
189   least significant bit of the word.  Returned bits 31:1 are zero. */
190static
191UInt armg_calculate_flag_z ( UInt cc_op, UInt cc_dep1,
192                             UInt cc_dep2, UInt cc_dep3 )
193{
194#  if PROFILE_NZCV_FLAGS
195   NOTE_Z_EVAL(cc_op);
196#  endif
197
198   switch (cc_op) {
199      case ARMG_CC_OP_COPY: {
200         /* (nzcv:28x0, unused, unused) */
201         UInt zf   = (cc_dep1 >> ARMG_CC_SHIFT_Z) & 1;
202         return zf;
203      }
204      case ARMG_CC_OP_ADD: {
205         /* (argL, argR, unused) */
206         UInt argL = cc_dep1;
207         UInt argR = cc_dep2;
208         UInt res  = argL + argR;
209         UInt zf   = res == 0;
210         return zf;
211      }
212      case ARMG_CC_OP_SUB: {
213         /* (argL, argR, unused) */
214         UInt argL = cc_dep1;
215         UInt argR = cc_dep2;
216         UInt res  = argL - argR;
217         UInt zf   = res == 0;
218         return zf;
219      }
220      case ARMG_CC_OP_ADC: {
221         /* (argL, argR, oldC) */
222         UInt argL = cc_dep1;
223         UInt argR = cc_dep2;
224         UInt oldC = cc_dep3;
225         vassert((oldC & ~1) == 0);
226         UInt res  = argL + argR + oldC;
227         UInt zf   = res == 0;
228         return zf;
229      }
230      case ARMG_CC_OP_SBB: {
231         /* (argL, argR, oldC) */
232         UInt argL = cc_dep1;
233         UInt argR = cc_dep2;
234         UInt oldC = cc_dep3;
235         vassert((oldC & ~1) == 0);
236         UInt res  = argL - argR - (oldC ^ 1);
237         UInt zf   = res == 0;
238         return zf;
239      }
240      case ARMG_CC_OP_LOGIC: {
241         /* (res, shco, oldV) */
242         UInt res  = cc_dep1;
243         UInt zf   = res == 0;
244         return zf;
245      }
246      case ARMG_CC_OP_MUL: {
247         /* (res, unused, oldC:oldV) */
248         UInt res  = cc_dep1;
249         UInt zf   = res == 0;
250         return zf;
251      }
252      case ARMG_CC_OP_MULL: {
253         /* (resLo32, resHi32, oldC:oldV) */
254         UInt resLo32 = cc_dep1;
255         UInt resHi32 = cc_dep2;
256         UInt zf      = (resHi32|resLo32) == 0;
257         return zf;
258      }
259      default:
260         /* shouldn't really make these calls from generated code */
261         vex_printf("armg_calculate_flags_z"
262                    "( op=%u, dep1=0x%x, dep2=0x%x, dep3=0x%x )\n",
263                    cc_op, cc_dep1, cc_dep2, cc_dep3 );
264         vpanic("armg_calculate_flags_z");
265   }
266}
267
268
269/* CALLED FROM GENERATED CODE: CLEAN HELPER */
270/* Calculate the C flag from the supplied thunk components, in the
271   least significant bit of the word.  Returned bits 31:1 are zero. */
272UInt armg_calculate_flag_c ( UInt cc_op, UInt cc_dep1,
273                             UInt cc_dep2, UInt cc_dep3 )
274{
275#  if PROFILE_NZCV_FLAGS
276   NOTE_C_EVAL(cc_op);
277#  endif
278
279   switch (cc_op) {
280      case ARMG_CC_OP_COPY: {
281         /* (nzcv:28x0, unused, unused) */
282         UInt cf   = (cc_dep1 >> ARMG_CC_SHIFT_C) & 1;
283         return cf;
284      }
285      case ARMG_CC_OP_ADD: {
286         /* (argL, argR, unused) */
287         UInt argL = cc_dep1;
288         UInt argR = cc_dep2;
289         UInt res  = argL + argR;
290         UInt cf   = res < argL;
291         return cf;
292      }
293      case ARMG_CC_OP_SUB: {
294         /* (argL, argR, unused) */
295         UInt argL = cc_dep1;
296         UInt argR = cc_dep2;
297         UInt cf   = argL >= argR;
298         return cf;
299      }
300      case ARMG_CC_OP_ADC: {
301         /* (argL, argR, oldC) */
302         UInt argL = cc_dep1;
303         UInt argR = cc_dep2;
304         UInt oldC = cc_dep3;
305         vassert((oldC & ~1) == 0);
306         UInt res  = argL + argR + oldC;
307         UInt cf   = oldC ? (res <= argL) : (res < argL);
308         return cf;
309      }
310      case ARMG_CC_OP_SBB: {
311         /* (argL, argR, oldC) */
312         UInt argL = cc_dep1;
313         UInt argR = cc_dep2;
314         UInt oldC = cc_dep3;
315         vassert((oldC & ~1) == 0);
316         UInt cf   = oldC ? (argL >= argR) : (argL > argR);
317         return cf;
318      }
319      case ARMG_CC_OP_LOGIC: {
320         /* (res, shco, oldV) */
321         UInt shco = cc_dep2;
322         vassert((shco & ~1) == 0);
323         UInt cf   = shco;
324         return cf;
325      }
326      case ARMG_CC_OP_MUL: {
327         /* (res, unused, oldC:oldV) */
328         UInt oldC = (cc_dep3 >> 1) & 1;
329         vassert((cc_dep3 & ~3) == 0);
330         UInt cf   = oldC;
331         return cf;
332      }
333      case ARMG_CC_OP_MULL: {
334         /* (resLo32, resHi32, oldC:oldV) */
335         UInt oldC    = (cc_dep3 >> 1) & 1;
336         vassert((cc_dep3 & ~3) == 0);
337         UInt cf      = oldC;
338         return cf;
339      }
340      default:
341         /* shouldn't really make these calls from generated code */
342         vex_printf("armg_calculate_flag_c"
343                    "( op=%u, dep1=0x%x, dep2=0x%x, dep3=0x%x )\n",
344                    cc_op, cc_dep1, cc_dep2, cc_dep3 );
345         vpanic("armg_calculate_flag_c");
346   }
347}
348
349
350/* CALLED FROM GENERATED CODE: CLEAN HELPER */
351/* Calculate the V flag from the supplied thunk components, in the
352   least significant bit of the word.  Returned bits 31:1 are zero. */
353UInt armg_calculate_flag_v ( UInt cc_op, UInt cc_dep1,
354                             UInt cc_dep2, UInt cc_dep3 )
355{
356#  if PROFILE_NZCV_FLAGS
357   NOTE_V_EVAL(cc_op);
358#  endif
359
360   switch (cc_op) {
361      case ARMG_CC_OP_COPY: {
362         /* (nzcv:28x0, unused, unused) */
363         UInt vf   = (cc_dep1 >> ARMG_CC_SHIFT_V) & 1;
364         return vf;
365      }
366      case ARMG_CC_OP_ADD: {
367         /* (argL, argR, unused) */
368         UInt argL = cc_dep1;
369         UInt argR = cc_dep2;
370         UInt res  = argL + argR;
371         UInt vf   = ((res ^ argL) & (res ^ argR)) >> 31;
372         return vf;
373      }
374      case ARMG_CC_OP_SUB: {
375         /* (argL, argR, unused) */
376         UInt argL = cc_dep1;
377         UInt argR = cc_dep2;
378         UInt res  = argL - argR;
379         UInt vf   = ((argL ^ argR) & (argL ^ res)) >> 31;
380         return vf;
381      }
382      case ARMG_CC_OP_ADC: {
383         /* (argL, argR, oldC) */
384         UInt argL = cc_dep1;
385         UInt argR = cc_dep2;
386         UInt oldC = cc_dep3;
387         vassert((oldC & ~1) == 0);
388         UInt res  = argL + argR + oldC;
389         UInt vf   = ((res ^ argL) & (res ^ argR)) >> 31;
390         return vf;
391      }
392      case ARMG_CC_OP_SBB: {
393         /* (argL, argR, oldC) */
394         UInt argL = cc_dep1;
395         UInt argR = cc_dep2;
396         UInt oldC = cc_dep3;
397         vassert((oldC & ~1) == 0);
398         UInt res  = argL - argR - (oldC ^ 1);
399         UInt vf   = ((argL ^ argR) & (argL ^ res)) >> 31;
400         return vf;
401      }
402      case ARMG_CC_OP_LOGIC: {
403         /* (res, shco, oldV) */
404         UInt oldV = cc_dep3;
405         vassert((oldV & ~1) == 0);
406         UInt vf   = oldV;
407         return vf;
408      }
409      case ARMG_CC_OP_MUL: {
410         /* (res, unused, oldC:oldV) */
411         UInt oldV = (cc_dep3 >> 0) & 1;
412         vassert((cc_dep3 & ~3) == 0);
413         UInt vf   = oldV;
414         return vf;
415      }
416      case ARMG_CC_OP_MULL: {
417         /* (resLo32, resHi32, oldC:oldV) */
418         UInt oldV    = (cc_dep3 >> 0) & 1;
419         vassert((cc_dep3 & ~3) == 0);
420         UInt vf      = oldV;
421         return vf;
422      }
423      default:
424         /* shouldn't really make these calls from generated code */
425         vex_printf("armg_calculate_flag_v"
426                    "( op=%u, dep1=0x%x, dep2=0x%x, dep3=0x%x )\n",
427                    cc_op, cc_dep1, cc_dep2, cc_dep3 );
428         vpanic("armg_calculate_flag_v");
429   }
430}
431
432
433/* CALLED FROM GENERATED CODE: CLEAN HELPER */
434/* Calculate NZCV from the supplied thunk components, in the positions
435   they appear in the CPSR, viz bits 31:28 for N Z C V respectively.
436   Returned bits 27:0 are zero. */
437UInt armg_calculate_flags_nzcv ( UInt cc_op, UInt cc_dep1,
438                                 UInt cc_dep2, UInt cc_dep3 )
439{
440   UInt f;
441   UInt res = 0;
442   f = armg_calculate_flag_n(cc_op, cc_dep1, cc_dep2, cc_dep3);
443   res |= (f << ARMG_CC_SHIFT_N);
444   f = armg_calculate_flag_z(cc_op, cc_dep1, cc_dep2, cc_dep3);
445   res |= (f << ARMG_CC_SHIFT_Z);
446   f = armg_calculate_flag_c(cc_op, cc_dep1, cc_dep2, cc_dep3);
447   res |= (f << ARMG_CC_SHIFT_C);
448   f = armg_calculate_flag_v(cc_op, cc_dep1, cc_dep2, cc_dep3);
449   res |= (f << ARMG_CC_SHIFT_V);
450   return res;
451}
452
453
454/* CALLED FROM GENERATED CODE: CLEAN HELPER */
455/* Calculate the QC flag from the arguments, in the lowest bit
456   of the word (bit 0).  Urr, having this out of line is bizarre.
457   Push back inline. */
458UInt armg_calculate_flag_qc ( UInt resL1, UInt resL2,
459                              UInt resR1, UInt resR2 )
460{
461   if (resL1 != resR1 || resL2 != resR2)
462      return 1;
463   else
464      return 0;
465}
466
467/* CALLED FROM GENERATED CODE: CLEAN HELPER */
468/* Calculate the specified condition from the thunk components, in the
469   lowest bit of the word (bit 0).  Returned bits 31:1 are zero. */
470UInt armg_calculate_condition ( UInt cond_n_op /* (ARMCondcode << 4) | cc_op */,
471                                UInt cc_dep1,
472                                UInt cc_dep2, UInt cc_dep3 )
473{
474   UInt cond  = cond_n_op >> 4;
475   UInt cc_op = cond_n_op & 0xF;
476   UInt nf, zf, vf, cf, inv;
477   //   vex_printf("XXXXXXXX %x %x %x %x\n",
478   //              cond_n_op, cc_dep1, cc_dep2, cc_dep3);
479
480   // skip flags computation in this case
481   if (cond == ARMCondAL) return 1;
482
483   inv  = cond & 1;
484
485   switch (cond) {
486      case ARMCondEQ:    // Z=1         => z
487      case ARMCondNE:    // Z=0
488         zf = armg_calculate_flag_z(cc_op, cc_dep1, cc_dep2, cc_dep3);
489         return inv ^ zf;
490
491      case ARMCondHS:    // C=1         => c
492      case ARMCondLO:    // C=0
493         cf = armg_calculate_flag_c(cc_op, cc_dep1, cc_dep2, cc_dep3);
494         return inv ^ cf;
495
496      case ARMCondMI:    // N=1         => n
497      case ARMCondPL:    // N=0
498         nf = armg_calculate_flag_n(cc_op, cc_dep1, cc_dep2, cc_dep3);
499         return inv ^ nf;
500
501      case ARMCondVS:    // V=1         => v
502      case ARMCondVC:    // V=0
503         vf = armg_calculate_flag_v(cc_op, cc_dep1, cc_dep2, cc_dep3);
504         return inv ^ vf;
505
506      case ARMCondHI:    // C=1 && Z=0   => c & ~z
507      case ARMCondLS:    // C=0 || Z=1
508         cf = armg_calculate_flag_c(cc_op, cc_dep1, cc_dep2, cc_dep3);
509         zf = armg_calculate_flag_z(cc_op, cc_dep1, cc_dep2, cc_dep3);
510         return inv ^ (1 & (cf & ~zf));
511
512      case ARMCondGE:    // N=V          => ~(n^v)
513      case ARMCondLT:    // N!=V
514         nf = armg_calculate_flag_n(cc_op, cc_dep1, cc_dep2, cc_dep3);
515         vf = armg_calculate_flag_v(cc_op, cc_dep1, cc_dep2, cc_dep3);
516         return inv ^ (1 & ~(nf ^ vf));
517
518      case ARMCondGT:    // Z=0 && N=V   => ~z & ~(n^v)  =>  ~(z | (n^v))
519      case ARMCondLE:    // Z=1 || N!=V
520         nf = armg_calculate_flag_n(cc_op, cc_dep1, cc_dep2, cc_dep3);
521         vf = armg_calculate_flag_v(cc_op, cc_dep1, cc_dep2, cc_dep3);
522         zf = armg_calculate_flag_z(cc_op, cc_dep1, cc_dep2, cc_dep3);
523         return inv ^ (1 & ~(zf | (nf ^ vf)));
524
525      case ARMCondAL: // handled above
526      case ARMCondNV: // should never get here: Illegal instr
527      default:
528         /* shouldn't really make these calls from generated code */
529         vex_printf("armg_calculate_condition(ARM)"
530                    "( %u, %u, 0x%x, 0x%x, 0x%x )\n",
531                    cond, cc_op, cc_dep1, cc_dep2, cc_dep3 );
532         vpanic("armg_calculate_condition(ARM)");
533   }
534}
535
536
537/*---------------------------------------------------------------*/
538/*--- Flag-helpers translation-time function specialisers.    ---*/
539/*--- These help iropt specialise calls the above run-time    ---*/
540/*--- flags functions.                                        ---*/
541/*---------------------------------------------------------------*/
542
543/* Used by the optimiser to try specialisations.  Returns an
544   equivalent expression, or NULL if none. */
545
546static Bool isU32 ( IRExpr* e, UInt n )
547{
548   return
549      toBool( e->tag == Iex_Const
550              && e->Iex.Const.con->tag == Ico_U32
551              && e->Iex.Const.con->Ico.U32 == n );
552}
553
554IRExpr* guest_arm_spechelper ( const HChar* function_name,
555                               IRExpr** args,
556                               IRStmt** precedingStmts,
557                               Int      n_precedingStmts )
558{
559#  define unop(_op,_a1) IRExpr_Unop((_op),(_a1))
560#  define binop(_op,_a1,_a2) IRExpr_Binop((_op),(_a1),(_a2))
561#  define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
562#  define mkU8(_n)  IRExpr_Const(IRConst_U8(_n))
563
564   Int i, arity = 0;
565   for (i = 0; args[i]; i++)
566      arity++;
567#  if 0
568   vex_printf("spec request:\n");
569   vex_printf("   %s  ", function_name);
570   for (i = 0; i < arity; i++) {
571      vex_printf("  ");
572      ppIRExpr(args[i]);
573   }
574   vex_printf("\n");
575#  endif
576
577   /* --------- specialising "armg_calculate_condition" --------- */
578
579   if (vex_streq(function_name, "armg_calculate_condition")) {
580
581      /* specialise calls to the "armg_calculate_condition" function.
582         Not sure whether this is strictly necessary, but: the
583         replacement IR must produce only the values 0 or 1.  Bits
584         31:1 are required to be zero. */
585      IRExpr *cond_n_op, *cc_dep1, *cc_dep2, *cc_ndep;
586      vassert(arity == 4);
587      cond_n_op = args[0]; /* (ARMCondcode << 4)  |  ARMG_CC_OP_* */
588      cc_dep1   = args[1];
589      cc_dep2   = args[2];
590      cc_ndep   = args[3];
591
592      /*---------------- SUB ----------------*/
593
594      if (isU32(cond_n_op, (ARMCondEQ << 4) | ARMG_CC_OP_SUB)) {
595         /* EQ after SUB --> test argL == argR */
596         return unop(Iop_1Uto32,
597                     binop(Iop_CmpEQ32, cc_dep1, cc_dep2));
598      }
599      if (isU32(cond_n_op, (ARMCondNE << 4) | ARMG_CC_OP_SUB)) {
600         /* NE after SUB --> test argL != argR */
601         return unop(Iop_1Uto32,
602                     binop(Iop_CmpNE32, cc_dep1, cc_dep2));
603      }
604
605      if (isU32(cond_n_op, (ARMCondGT << 4) | ARMG_CC_OP_SUB)) {
606         /* GT after SUB --> test argL >s argR
607                         --> test argR <s argL */
608         return unop(Iop_1Uto32,
609                     binop(Iop_CmpLT32S, cc_dep2, cc_dep1));
610      }
611      if (isU32(cond_n_op, (ARMCondLE << 4) | ARMG_CC_OP_SUB)) {
612         /* LE after SUB --> test argL <=s argR */
613         return unop(Iop_1Uto32,
614                     binop(Iop_CmpLE32S, cc_dep1, cc_dep2));
615      }
616
617      if (isU32(cond_n_op, (ARMCondLT << 4) | ARMG_CC_OP_SUB)) {
618         /* LT after SUB --> test argL <s argR */
619         return unop(Iop_1Uto32,
620                     binop(Iop_CmpLT32S, cc_dep1, cc_dep2));
621      }
622
623      if (isU32(cond_n_op, (ARMCondGE << 4) | ARMG_CC_OP_SUB)) {
624         /* GE after SUB --> test argL >=s argR
625                         --> test argR <=s argL */
626         return unop(Iop_1Uto32,
627                     binop(Iop_CmpLE32S, cc_dep2, cc_dep1));
628      }
629
630      if (isU32(cond_n_op, (ARMCondHS << 4) | ARMG_CC_OP_SUB)) {
631         /* HS after SUB --> test argL >=u argR
632                         --> test argR <=u argL */
633         return unop(Iop_1Uto32,
634                     binop(Iop_CmpLE32U, cc_dep2, cc_dep1));
635      }
636      if (isU32(cond_n_op, (ARMCondLO << 4) | ARMG_CC_OP_SUB)) {
637         /* LO after SUB --> test argL <u argR */
638         return unop(Iop_1Uto32,
639                     binop(Iop_CmpLT32U, cc_dep1, cc_dep2));
640      }
641
642      if (isU32(cond_n_op, (ARMCondLS << 4) | ARMG_CC_OP_SUB)) {
643         /* LS after SUB --> test argL <=u argR */
644         return unop(Iop_1Uto32,
645                     binop(Iop_CmpLE32U, cc_dep1, cc_dep2));
646      }
647      if (isU32(cond_n_op, (ARMCondHI << 4) | ARMG_CC_OP_SUB)) {
648         /* HI after SUB --> test argL >u argR
649                         --> test argR <u argL */
650         return unop(Iop_1Uto32,
651                     binop(Iop_CmpLT32U, cc_dep2, cc_dep1));
652      }
653
654      /*---------------- SBB ----------------*/
655
656      if (isU32(cond_n_op, (ARMCondHS << 4) | ARMG_CC_OP_SBB)) {
657         /* This seems to happen a lot in softfloat code, eg __divdf3+140 */
658         /* thunk is: (dep1=argL, dep2=argR, ndep=oldC) */
659         /* HS after SBB (same as C after SBB below)
660            --> oldC ? (argL >=u argR) : (argL >u argR)
661            --> oldC ? (argR <=u argL) : (argR <u argL)
662         */
663         return
664            IRExpr_ITE(
665               binop(Iop_CmpNE32, cc_ndep, mkU32(0)),
666               /* case oldC != 0 */
667               unop(Iop_1Uto32, binop(Iop_CmpLE32U, cc_dep2, cc_dep1)),
668               /* case oldC == 0 */
669               unop(Iop_1Uto32, binop(Iop_CmpLT32U, cc_dep2, cc_dep1))
670            );
671      }
672
673      /*---------------- LOGIC ----------------*/
674
675      if (isU32(cond_n_op, (ARMCondEQ << 4) | ARMG_CC_OP_LOGIC)) {
676         /* EQ after LOGIC --> test res == 0 */
677         return unop(Iop_1Uto32,
678                     binop(Iop_CmpEQ32, cc_dep1, mkU32(0)));
679      }
680      if (isU32(cond_n_op, (ARMCondNE << 4) | ARMG_CC_OP_LOGIC)) {
681         /* NE after LOGIC --> test res != 0 */
682         return unop(Iop_1Uto32,
683                     binop(Iop_CmpNE32, cc_dep1, mkU32(0)));
684      }
685
686      if (isU32(cond_n_op, (ARMCondPL << 4) | ARMG_CC_OP_LOGIC)) {
687         /* PL after LOGIC --> test (res >> 31) == 0 */
688         return unop(Iop_1Uto32,
689                     binop(Iop_CmpEQ32,
690                           binop(Iop_Shr32, cc_dep1, mkU8(31)),
691                           mkU32(0)));
692      }
693      if (isU32(cond_n_op, (ARMCondMI << 4) | ARMG_CC_OP_LOGIC)) {
694         /* MI after LOGIC --> test (res >> 31) == 1 */
695         return unop(Iop_1Uto32,
696                     binop(Iop_CmpEQ32,
697                           binop(Iop_Shr32, cc_dep1, mkU8(31)),
698                           mkU32(1)));
699      }
700
701      /*---------------- COPY ----------------*/
702
703      /* --- 0,1 --- */
704      if (isU32(cond_n_op, (ARMCondEQ << 4) | ARMG_CC_OP_COPY)) {
705         /* EQ after COPY --> (cc_dep1 >> ARMG_CC_SHIFT_Z) & 1 */
706         return binop(Iop_And32,
707                      binop(Iop_Shr32, cc_dep1,
708                            mkU8(ARMG_CC_SHIFT_Z)),
709                      mkU32(1));
710      }
711      if (isU32(cond_n_op, (ARMCondNE << 4) | ARMG_CC_OP_COPY)) {
712         /* NE after COPY --> ((cc_dep1 >> ARMG_CC_SHIFT_Z) ^ 1) & 1 */
713         return binop(Iop_And32,
714                      binop(Iop_Xor32,
715                            binop(Iop_Shr32, cc_dep1,
716                                             mkU8(ARMG_CC_SHIFT_Z)),
717                            mkU32(1)),
718                      mkU32(1));
719      }
720
721      /* --- 4,5 --- */
722      if (isU32(cond_n_op, (ARMCondMI << 4) | ARMG_CC_OP_COPY)) {
723         /* MI after COPY --> (cc_dep1 >> ARMG_CC_SHIFT_N) & 1 */
724         return binop(Iop_And32,
725                      binop(Iop_Shr32, cc_dep1,
726                            mkU8(ARMG_CC_SHIFT_N)),
727                      mkU32(1));
728      }
729      if (isU32(cond_n_op, (ARMCondPL << 4) | ARMG_CC_OP_COPY)) {
730         /* PL after COPY --> ((cc_dep1 >> ARMG_CC_SHIFT_N) ^ 1) & 1 */
731         return binop(Iop_And32,
732                      binop(Iop_Xor32,
733                            binop(Iop_Shr32, cc_dep1,
734                                             mkU8(ARMG_CC_SHIFT_N)),
735                            mkU32(1)),
736                      mkU32(1));
737      }
738
739      /* --- 12,13 --- */
740      if (isU32(cond_n_op, (ARMCondGT << 4) | ARMG_CC_OP_COPY)) {
741         /* GT after COPY --> ((z | (n^v)) & 1) ^ 1 */
742         IRExpr* n = binop(Iop_Shr32, cc_dep1, mkU8(ARMG_CC_SHIFT_N));
743         IRExpr* v = binop(Iop_Shr32, cc_dep1, mkU8(ARMG_CC_SHIFT_V));
744         IRExpr* z = binop(Iop_Shr32, cc_dep1, mkU8(ARMG_CC_SHIFT_Z));
745         return binop(Iop_Xor32,
746                      binop(Iop_And32,
747                            binop(Iop_Or32, z, binop(Iop_Xor32, n, v)),
748                            mkU32(1)),
749                      mkU32(1));
750      }
751      if (isU32(cond_n_op, (ARMCondLE << 4) | ARMG_CC_OP_COPY)) {
752         /* LE after COPY --> ((z | (n^v)) & 1) ^ 0 */
753         IRExpr* n = binop(Iop_Shr32, cc_dep1, mkU8(ARMG_CC_SHIFT_N));
754         IRExpr* v = binop(Iop_Shr32, cc_dep1, mkU8(ARMG_CC_SHIFT_V));
755         IRExpr* z = binop(Iop_Shr32, cc_dep1, mkU8(ARMG_CC_SHIFT_Z));
756         return binop(Iop_Xor32,
757                      binop(Iop_And32,
758                            binop(Iop_Or32, z, binop(Iop_Xor32, n, v)),
759                            mkU32(1)),
760                      mkU32(0));
761      }
762
763      /*----------------- AL -----------------*/
764
765      /* A critically important case for Thumb code.
766
767         What we're trying to spot is the case where cond_n_op is an
768         expression of the form Or32(..., 0xE0) since that means the
769         caller is asking for CondAL and we can simply return 1
770         without caring what the ... part is.  This is a potentially
771         dodgy kludge in that it assumes that the ... part has zeroes
772         in bits 7:4, so that the result of the Or32 is guaranteed to
773         be 0xE in bits 7:4.  Given that the places where this first
774         arg are constructed (in guest_arm_toIR.c) are very
775         constrained, we can get away with this.  To make this
776         guaranteed safe would require to have a new primop, Slice44
777         or some such, thusly
778
779         Slice44(arg1, arg2) = 0--(24)--0 arg1[7:4] arg2[3:0]
780
781         and we would then look for Slice44(0xE0, ...)
782         which would give the required safety property.
783
784         It would be infeasibly expensive to scan backwards through
785         the entire block looking for an assignment to the temp, so
786         just look at the previous 16 statements.  That should find it
787         if it is an interesting case, as a result of how the
788         boilerplate guff at the start of each Thumb insn translation
789         is made.
790      */
791      if (cond_n_op->tag == Iex_RdTmp) {
792         Int    j;
793         IRTemp look_for = cond_n_op->Iex.RdTmp.tmp;
794         Int    limit    = n_precedingStmts - 16;
795         if (limit < 0) limit = 0;
796         if (0) vex_printf("scanning %d .. %d\n", n_precedingStmts-1, limit);
797         for (j = n_precedingStmts - 1; j >= limit; j--) {
798            IRStmt* st = precedingStmts[j];
799            if (st->tag == Ist_WrTmp
800                && st->Ist.WrTmp.tmp == look_for
801                && st->Ist.WrTmp.data->tag == Iex_Binop
802                && st->Ist.WrTmp.data->Iex.Binop.op == Iop_Or32
803                && isU32(st->Ist.WrTmp.data->Iex.Binop.arg2, (ARMCondAL << 4)))
804               return mkU32(1);
805         }
806         /* Didn't find any useful binding to the first arg
807            in the previous 16 stmts. */
808      }
809   }
810
811   /* --------- specialising "armg_calculate_flag_c" --------- */
812
813   else
814   if (vex_streq(function_name, "armg_calculate_flag_c")) {
815
816      /* specialise calls to the "armg_calculate_flag_c" function.
817         Note that the returned value must be either 0 or 1; nonzero
818         bits 31:1 are not allowed.  In turn, incoming oldV and oldC
819         values (from the thunk) are assumed to have bits 31:1
820         clear. */
821      IRExpr *cc_op, *cc_dep1, *cc_dep2, *cc_ndep;
822      vassert(arity == 4);
823      cc_op   = args[0]; /* ARMG_CC_OP_* */
824      cc_dep1 = args[1];
825      cc_dep2 = args[2];
826      cc_ndep = args[3];
827
828      if (isU32(cc_op, ARMG_CC_OP_LOGIC)) {
829         /* Thunk args are (result, shco, oldV) */
830         /* C after LOGIC --> shco */
831         return cc_dep2;
832      }
833
834      if (isU32(cc_op, ARMG_CC_OP_SUB)) {
835         /* Thunk args are (argL, argR, unused) */
836         /* C after SUB --> argL >=u argR
837                        --> argR <=u argL */
838         return unop(Iop_1Uto32,
839                     binop(Iop_CmpLE32U, cc_dep2, cc_dep1));
840      }
841
842      if (isU32(cc_op, ARMG_CC_OP_SBB)) {
843         /* This happens occasionally in softfloat code, eg __divdf3+140 */
844         /* thunk is: (dep1=argL, dep2=argR, ndep=oldC) */
845         /* C after SBB (same as HS after SBB above)
846            --> oldC ? (argL >=u argR) : (argL >u argR)
847            --> oldC ? (argR <=u argL) : (argR <u argL)
848         */
849         return
850            IRExpr_ITE(
851               binop(Iop_CmpNE32, cc_ndep, mkU32(0)),
852               /* case oldC != 0 */
853               unop(Iop_1Uto32, binop(Iop_CmpLE32U, cc_dep2, cc_dep1)),
854               /* case oldC == 0 */
855               unop(Iop_1Uto32, binop(Iop_CmpLT32U, cc_dep2, cc_dep1))
856            );
857      }
858
859   }
860
861   /* --------- specialising "armg_calculate_flag_v" --------- */
862
863   else
864   if (vex_streq(function_name, "armg_calculate_flag_v")) {
865
866      /* specialise calls to the "armg_calculate_flag_v" function.
867         Note that the returned value must be either 0 or 1; nonzero
868         bits 31:1 are not allowed.  In turn, incoming oldV and oldC
869         values (from the thunk) are assumed to have bits 31:1
870         clear. */
871      IRExpr *cc_op, *cc_dep1, *cc_dep2, *cc_ndep;
872      vassert(arity == 4);
873      cc_op   = args[0]; /* ARMG_CC_OP_* */
874      cc_dep1 = args[1];
875      cc_dep2 = args[2];
876      cc_ndep = args[3];
877
878      if (isU32(cc_op, ARMG_CC_OP_LOGIC)) {
879         /* Thunk args are (result, shco, oldV) */
880         /* V after LOGIC --> oldV */
881         return cc_ndep;
882      }
883
884      if (isU32(cc_op, ARMG_CC_OP_SUB)) {
885         /* Thunk args are (argL, argR, unused) */
886         /* V after SUB
887            --> let res = argL - argR
888                in ((argL ^ argR) & (argL ^ res)) >> 31
889            --> ((argL ^ argR) & (argL ^ (argL - argR))) >> 31
890         */
891         IRExpr* argL = cc_dep1;
892         IRExpr* argR = cc_dep2;
893         return
894            binop(Iop_Shr32,
895                  binop(Iop_And32,
896                        binop(Iop_Xor32, argL, argR),
897                        binop(Iop_Xor32, argL, binop(Iop_Sub32, argL, argR))
898                  ),
899                  mkU8(31)
900            );
901      }
902
903      if (isU32(cc_op, ARMG_CC_OP_SBB)) {
904         /* This happens occasionally in softfloat code, eg __divdf3+140 */
905         /* thunk is: (dep1=argL, dep2=argR, ndep=oldC) */
906         /* V after SBB
907            --> let res = argL - argR - (oldC ^ 1)
908                in  (argL ^ argR) & (argL ^ res) & 1
909         */
910         return
911            binop(
912               Iop_And32,
913               binop(
914                  Iop_And32,
915                  // argL ^ argR
916                  binop(Iop_Xor32, cc_dep1, cc_dep2),
917                  // argL ^ (argL - argR - (oldC ^ 1))
918                  binop(Iop_Xor32,
919                        cc_dep1,
920                        binop(Iop_Sub32,
921                              binop(Iop_Sub32, cc_dep1, cc_dep2),
922                              binop(Iop_Xor32, cc_ndep, mkU32(1)))
923                  )
924               ),
925               mkU32(1)
926            );
927      }
928
929   }
930
931#  undef unop
932#  undef binop
933#  undef mkU32
934#  undef mkU8
935
936   return NULL;
937}
938
939
940/*----------------------------------------------*/
941/*--- The exported fns ..                    ---*/
942/*----------------------------------------------*/
943
944/* VISIBLE TO LIBVEX CLIENT */
945#if 0
946void LibVEX_GuestARM_put_flags ( UInt flags_native,
947                                 /*OUT*/VexGuestARMState* vex_state )
948{
949   vassert(0); // FIXME
950
951   /* Mask out everything except N Z V C. */
952   flags_native
953      &= (ARMG_CC_MASK_N | ARMG_CC_MASK_Z | ARMG_CC_MASK_V | ARMG_CC_MASK_C);
954
955   vex_state->guest_CC_OP   = ARMG_CC_OP_COPY;
956   vex_state->guest_CC_DEP1 = flags_native;
957   vex_state->guest_CC_DEP2 = 0;
958   vex_state->guest_CC_NDEP = 0;
959}
960#endif
961
962/* VISIBLE TO LIBVEX CLIENT */
963UInt LibVEX_GuestARM_get_cpsr ( /*IN*/const VexGuestARMState* vex_state )
964{
965   UInt cpsr = 0;
966   // NZCV
967   cpsr |= armg_calculate_flags_nzcv(
968               vex_state->guest_CC_OP,
969               vex_state->guest_CC_DEP1,
970               vex_state->guest_CC_DEP2,
971               vex_state->guest_CC_NDEP
972            );
973   vassert(0 == (cpsr & 0x0FFFFFFF));
974   // Q
975   if (vex_state->guest_QFLAG32 > 0)
976      cpsr |= (1 << 27);
977   // GE
978   if (vex_state->guest_GEFLAG0 > 0)
979      cpsr |= (1 << 16);
980   if (vex_state->guest_GEFLAG1 > 0)
981      cpsr |= (1 << 17);
982   if (vex_state->guest_GEFLAG2 > 0)
983      cpsr |= (1 << 18);
984   if (vex_state->guest_GEFLAG3 > 0)
985      cpsr |= (1 << 19);
986   // M
987   cpsr |= (1 << 4); // 0b10000 means user-mode
988   // J,T   J (bit 24) is zero by initialisation above
989   // T  we copy from R15T[0]
990   if (vex_state->guest_R15T & 1)
991      cpsr |= (1 << 5);
992   // ITSTATE we punt on for the time being.  Could compute it
993   // if needed though.
994   // E, endianness, 0 (littleendian) from initialisation above
995   // A,I,F disable some async exceptions.  Not sure about these.
996   // Leave as zero for the time being.
997   return cpsr;
998}
999
1000/* VISIBLE TO LIBVEX CLIENT */
1001void LibVEX_GuestARM_initialise ( /*OUT*/VexGuestARMState* vex_state )
1002{
1003   vex_state->host_EvC_FAILADDR = 0;
1004   vex_state->host_EvC_COUNTER = 0;
1005
1006   vex_state->guest_R0  = 0;
1007   vex_state->guest_R1  = 0;
1008   vex_state->guest_R2  = 0;
1009   vex_state->guest_R3  = 0;
1010   vex_state->guest_R4  = 0;
1011   vex_state->guest_R5  = 0;
1012   vex_state->guest_R6  = 0;
1013   vex_state->guest_R7  = 0;
1014   vex_state->guest_R8  = 0;
1015   vex_state->guest_R9  = 0;
1016   vex_state->guest_R10 = 0;
1017   vex_state->guest_R11 = 0;
1018   vex_state->guest_R12 = 0;
1019   vex_state->guest_R13 = 0;
1020   vex_state->guest_R14 = 0;
1021   vex_state->guest_R15T = 0;  /* NB: implies ARM mode */
1022
1023   vex_state->guest_CC_OP   = ARMG_CC_OP_COPY;
1024   vex_state->guest_CC_DEP1 = 0;
1025   vex_state->guest_CC_DEP2 = 0;
1026   vex_state->guest_CC_NDEP = 0;
1027   vex_state->guest_QFLAG32 = 0;
1028   vex_state->guest_GEFLAG0 = 0;
1029   vex_state->guest_GEFLAG1 = 0;
1030   vex_state->guest_GEFLAG2 = 0;
1031   vex_state->guest_GEFLAG3 = 0;
1032
1033   vex_state->guest_EMNOTE  = EmNote_NONE;
1034   vex_state->guest_CMSTART = 0;
1035   vex_state->guest_CMLEN   = 0;
1036   vex_state->guest_NRADDR  = 0;
1037   vex_state->guest_IP_AT_SYSCALL = 0;
1038
1039   vex_state->guest_D0  = 0;
1040   vex_state->guest_D1  = 0;
1041   vex_state->guest_D2  = 0;
1042   vex_state->guest_D3  = 0;
1043   vex_state->guest_D4  = 0;
1044   vex_state->guest_D5  = 0;
1045   vex_state->guest_D6  = 0;
1046   vex_state->guest_D7  = 0;
1047   vex_state->guest_D8  = 0;
1048   vex_state->guest_D9  = 0;
1049   vex_state->guest_D10 = 0;
1050   vex_state->guest_D11 = 0;
1051   vex_state->guest_D12 = 0;
1052   vex_state->guest_D13 = 0;
1053   vex_state->guest_D14 = 0;
1054   vex_state->guest_D15 = 0;
1055   vex_state->guest_D16 = 0;
1056   vex_state->guest_D17 = 0;
1057   vex_state->guest_D18 = 0;
1058   vex_state->guest_D19 = 0;
1059   vex_state->guest_D20 = 0;
1060   vex_state->guest_D21 = 0;
1061   vex_state->guest_D22 = 0;
1062   vex_state->guest_D23 = 0;
1063   vex_state->guest_D24 = 0;
1064   vex_state->guest_D25 = 0;
1065   vex_state->guest_D26 = 0;
1066   vex_state->guest_D27 = 0;
1067   vex_state->guest_D28 = 0;
1068   vex_state->guest_D29 = 0;
1069   vex_state->guest_D30 = 0;
1070   vex_state->guest_D31 = 0;
1071
1072   /* ARM encoded; zero is the default as it happens (result flags
1073      (NZCV) cleared, FZ disabled, round to nearest, non-vector mode,
1074      all exns masked, all exn sticky bits cleared). */
1075   vex_state->guest_FPSCR = 0;
1076
1077   vex_state->guest_TPIDRURO = 0;
1078
1079   /* Not in a Thumb IT block. */
1080   vex_state->guest_ITSTATE = 0;
1081
1082   vex_state->padding1 = 0;
1083}
1084
1085
1086/*-----------------------------------------------------------*/
1087/*--- Describing the arm guest state, for the benefit     ---*/
1088/*--- of iropt and instrumenters.                         ---*/
1089/*-----------------------------------------------------------*/
1090
1091/* Figure out if any part of the guest state contained in minoff
1092   .. maxoff requires precise memory exceptions.  If in doubt return
1093   True (but this generates significantly slower code).
1094
1095   We enforce precise exns for guest R13(sp), R15T(pc), R7, R11.
1096
1097
1098   Only R13(sp) is needed in mode VexRegUpdSpAtMemAccess.
1099*/
1100Bool guest_arm_state_requires_precise_mem_exns (
1101        Int minoff, Int maxoff, VexRegisterUpdates pxControl
1102     )
1103{
1104   Int sp_min = offsetof(VexGuestARMState, guest_R13);
1105   Int sp_max = sp_min + 4 - 1;
1106   Int pc_min = offsetof(VexGuestARMState, guest_R15T);
1107   Int pc_max = pc_min + 4 - 1;
1108
1109   if (maxoff < sp_min || minoff > sp_max) {
1110      /* no overlap with sp */
1111      if (pxControl == VexRegUpdSpAtMemAccess)
1112         return False; // We only need to check stack pointer.
1113   } else {
1114      return True;
1115   }
1116
1117   if (maxoff < pc_min || minoff > pc_max) {
1118      /* no overlap with pc */
1119   } else {
1120      return True;
1121   }
1122
1123   /* We appear to need precise updates of R11 in order to get proper
1124      stacktraces from non-optimised code. */
1125   Int r11_min = offsetof(VexGuestARMState, guest_R11);
1126   Int r11_max = r11_min + 4 - 1;
1127
1128   if (maxoff < r11_min || minoff > r11_max) {
1129      /* no overlap with r11 */
1130   } else {
1131      return True;
1132   }
1133
1134   /* Ditto R7, particularly needed for proper stacktraces in Thumb
1135      code. */
1136   Int r7_min = offsetof(VexGuestARMState, guest_R7);
1137   Int r7_max = r7_min + 4 - 1;
1138
1139   if (maxoff < r7_min || minoff > r7_max) {
1140      /* no overlap with r7 */
1141   } else {
1142      return True;
1143   }
1144
1145   return False;
1146}
1147
1148
1149
1150#define ALWAYSDEFD(field)                           \
1151    { offsetof(VexGuestARMState, field),            \
1152      (sizeof ((VexGuestARMState*)0)->field) }
1153
1154VexGuestLayout
1155   armGuest_layout
1156      = {
1157          /* Total size of the guest state, in bytes. */
1158          .total_sizeB = sizeof(VexGuestARMState),
1159
1160          /* Describe the stack pointer. */
1161          .offset_SP = offsetof(VexGuestARMState,guest_R13),
1162          .sizeof_SP = 4,
1163
1164          /* Describe the instruction pointer. */
1165          .offset_IP = offsetof(VexGuestARMState,guest_R15T),
1166          .sizeof_IP = 4,
1167
1168          /* Describe any sections to be regarded by Memcheck as
1169             'always-defined'. */
1170          .n_alwaysDefd = 10,
1171
1172          /* flags thunk: OP is always defd, whereas DEP1 and DEP2
1173             have to be tracked.  See detailed comment in gdefs.h on
1174             meaning of thunk fields. */
1175          .alwaysDefd
1176             = { /* 0 */ ALWAYSDEFD(guest_R15T),
1177                 /* 1 */ ALWAYSDEFD(guest_CC_OP),
1178                 /* 2 */ ALWAYSDEFD(guest_CC_NDEP),
1179                 /* 3 */ ALWAYSDEFD(guest_EMNOTE),
1180                 /* 4 */ ALWAYSDEFD(guest_CMSTART),
1181                 /* 5 */ ALWAYSDEFD(guest_CMLEN),
1182                 /* 6 */ ALWAYSDEFD(guest_NRADDR),
1183                 /* 7 */ ALWAYSDEFD(guest_IP_AT_SYSCALL),
1184                 /* 8 */ ALWAYSDEFD(guest_TPIDRURO),
1185                 /* 9 */ ALWAYSDEFD(guest_ITSTATE)
1186               }
1187        };
1188
1189
1190/*---------------------------------------------------------------*/
1191/*--- end                                 guest_arm_helpers.c ---*/
1192/*---------------------------------------------------------------*/
1193