h_main.c revision 03f8d3fc25f5a45c5826259d1b33b7f310117279
1
2/*--------------------------------------------------------------------*/
3/*--- Ptrcheck: a pointer-use checker.                             ---*/
4/*--- This file checks heap accesses.                              ---*/
5/*---                                                     h_main.c ---*/
6/*--------------------------------------------------------------------*/
7
8/*
9   This file is part of Ptrcheck, a Valgrind tool for checking pointer
10   use in programs.
11
12   Initial version (Annelid):
13
14   Copyright (C) 2003-2012 Nicholas Nethercote
15      njn@valgrind.org
16
17   Valgrind-3.X port:
18
19   Copyright (C) 2008-2012 OpenWorks Ltd
20      info@open-works.co.uk
21
22   This program is free software; you can redistribute it and/or
23   modify it under the terms of the GNU General Public License as
24   published by the Free Software Foundation; either version 2 of the
25   License, or (at your option) any later version.
26
27   This program is distributed in the hope that it will be useful, but
28   WITHOUT ANY WARRANTY; without even the implied warranty of
29   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
30   General Public License for more details.
31
32   You should have received a copy of the GNU General Public License
33   along with this program; if not, write to the Free Software
34   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
35   02111-1307, USA.
36
37   The GNU General Public License is contained in the file COPYING.
38*/
39
40#include "pub_tool_basics.h"
41#include "pub_tool_libcbase.h"
42#include "pub_tool_libcprint.h"
43#include "pub_tool_libcassert.h"
44#include "pub_tool_mallocfree.h"
45#include "pub_tool_execontext.h"
46#include "pub_tool_hashtable.h"
47#include "pub_tool_tooliface.h"
48#include "pub_tool_replacemalloc.h"
49#include "pub_tool_options.h"
50#include "pub_tool_execontext.h"
51#include "pub_tool_aspacemgr.h"    // VG_(am_shadow_malloc)
52#include "pub_tool_vki.h"          // VKI_MAX_PAGE_SIZE
53#include "pub_tool_machine.h"      // VG_({get,set}_shadow_regs_area) et al
54#include "pub_tool_debuginfo.h"    // VG_(get_fnname)
55#include "pub_tool_threadstate.h"  // VG_(get_running_tid)
56#include "pub_tool_oset.h"
57#include "pub_tool_vkiscnums.h"
58#include "pub_tool_machine.h"
59#include "pub_tool_wordfm.h"
60#include "pub_tool_xarray.h"
61
62#include "pc_common.h"
63
64//#include "h_list.h"
65#include "h_main.h"
66
67#include "sg_main.h"   // sg_instrument_*, and struct _SGEnv
68
69
70
71/*------------------------------------------------------------*/
72/*--- Debug/trace options                                  ---*/
73/*------------------------------------------------------------*/
74
75static ULong stats__client_mallocs = 0;
76static ULong stats__client_frees   = 0;
77static ULong stats__segs_allocd    = 0;
78static ULong stats__segs_recycled  = 0;
79
80
81//////////////////////////////////////////////////////////////
82//                                                          //
83// Segments low level storage                               //
84//                                                          //
85//////////////////////////////////////////////////////////////
86
87// NONPTR, UNKNOWN, BOTTOM defined in h_main.h since
88// pc_common.c needs to see them, for error processing
89
90// we only start recycling segs when this many exist
91#define N_FREED_SEGS (1 * 1000 * 1000)
92
93struct _Seg {
94   Addr  addr;
95   SizeT szB; /* may be zero */
96   ExeContext* ec;  /* where malloc'd or freed */
97   /* When 1, indicates block is in use.  Otherwise, used to form a
98      linked list of freed blocks, running from oldest freed block to
99      the most recently freed block. */
100   struct _Seg* nextfree;
101};
102
103// Determines if 'a' is before, within, or after seg's range.  Sets 'cmp' to
104// -1/0/1 accordingly.  Sets 'n' to the number of bytes before/within/after.
105void Seg__cmp(Seg* seg, Addr a, Int* cmp, UWord* n)
106{
107   if (a < seg->addr) {
108      *cmp = -1;
109      *n   = seg->addr - a;
110   } else if (a < seg->addr + seg->szB && seg->szB > 0) {
111      *cmp = 0;
112      *n = a - seg->addr;
113   } else {
114      *cmp = 1;
115      *n = a - (seg->addr + seg->szB);
116   }
117}
118
119/*inline*/ Bool Seg__is_freed(Seg* seg)
120{
121   if (!is_known_segment(seg))
122      return False;
123   else
124      return seg->nextfree != (Seg*)1;
125}
126
127ExeContext* Seg__where(Seg* seg)
128{
129   tl_assert(is_known_segment(seg));
130   return seg->ec;
131}
132
133SizeT Seg__size(Seg* seg)
134{
135   tl_assert(is_known_segment(seg));
136   return seg->szB;
137}
138
139Addr Seg__addr(Seg* seg)
140{
141   tl_assert(is_known_segment(seg));
142   return seg->addr;
143}
144
145
146#define N_SEGS_PER_GROUP 10000
147
148typedef
149   struct _SegGroup {
150      struct _SegGroup* admin;
151      UWord nextfree; /* 0 .. N_SEGS_PER_GROUP */
152      Seg segs[N_SEGS_PER_GROUP];
153   }
154   SegGroup;
155
156static SegGroup* group_list = NULL;
157static UWord     nFreeSegs = 0;
158static Seg*      freesegs_youngest = NULL;
159static Seg*      freesegs_oldest = NULL;
160
161
162static SegGroup* new_SegGroup ( void ) {
163   SegGroup* g = VG_(malloc)("pc.h_main.nTG.1", sizeof(SegGroup));
164   VG_(memset)(g, 0, sizeof(*g));
165   return g;
166}
167
168/* Get a completely new Seg */
169static Seg* new_Seg ( void )
170{
171   Seg*      teg;
172   SegGroup* g;
173   if (group_list == NULL) {
174      g = new_SegGroup();
175      g->admin = NULL;
176      group_list = g;
177   }
178   tl_assert(group_list->nextfree <= N_SEGS_PER_GROUP);
179   if (group_list->nextfree == N_SEGS_PER_GROUP) {
180      g = new_SegGroup();
181      g->admin = group_list;
182      group_list = g;
183   }
184   tl_assert(group_list->nextfree < N_SEGS_PER_GROUP);
185   teg = &group_list->segs[ group_list->nextfree ];
186   group_list->nextfree++;
187   stats__segs_allocd++;
188   return teg;
189}
190
191static Seg* get_Seg_for_malloc ( void )
192{
193   Seg* seg;
194   if (nFreeSegs < N_FREED_SEGS) {
195      seg = new_Seg();
196      seg->nextfree = (Seg*)1;
197      return seg;
198   }
199   /* else recycle the oldest Seg in the free list */
200   tl_assert(freesegs_youngest);
201   tl_assert(freesegs_oldest);
202   tl_assert(freesegs_youngest != freesegs_oldest);
203   seg = freesegs_oldest;
204   freesegs_oldest = seg->nextfree;
205   nFreeSegs--;
206   seg->nextfree = (Seg*)1;
207   stats__segs_recycled++;
208   return seg;
209}
210
211static void set_Seg_freed ( Seg* seg )
212{
213   tl_assert(seg);
214   tl_assert(!Seg__is_freed(seg));
215   if (nFreeSegs == 0) {
216      tl_assert(freesegs_oldest == NULL);
217      tl_assert(freesegs_youngest == NULL);
218      seg->nextfree = NULL;
219      freesegs_youngest = seg;
220      freesegs_oldest = seg;
221      nFreeSegs++;
222   } else {
223      tl_assert(freesegs_youngest);
224      tl_assert(freesegs_oldest);
225      if (nFreeSegs == 1) {
226         tl_assert(freesegs_youngest == freesegs_oldest);
227      } else {
228         tl_assert(freesegs_youngest != freesegs_oldest);
229      }
230      tl_assert(freesegs_youngest->nextfree == NULL);
231      tl_assert(seg != freesegs_youngest && seg != freesegs_oldest);
232      seg->nextfree = NULL;
233      freesegs_youngest->nextfree = seg;
234      freesegs_youngest = seg;
235      nFreeSegs++;
236   }
237}
238
239static WordFM* addr_to_seg_map = NULL; /* GuestAddr -> Seg* */
240
241static void addr_to_seg_map_ENSURE_INIT ( void )
242{
243   if (UNLIKELY(addr_to_seg_map == NULL)) {
244      addr_to_seg_map = VG_(newFM)( VG_(malloc), "pc.h_main.attmEI.1",
245                                    VG_(free), NULL/*unboxedcmp*/ );
246   }
247}
248
249static Seg* find_Seg_by_addr ( Addr ga )
250{
251   UWord keyW, valW;
252   addr_to_seg_map_ENSURE_INIT();
253   if (VG_(lookupFM)( addr_to_seg_map, &keyW, &valW, (UWord)ga )) {
254      tl_assert(keyW == ga);
255      return (Seg*)valW;
256   } else {
257      return NULL;
258   }
259}
260
261static void bind_addr_to_Seg ( Addr ga, Seg* seg )
262{
263   Bool b;
264   addr_to_seg_map_ENSURE_INIT();
265   b = VG_(addToFM)( addr_to_seg_map, (UWord)ga, (UWord)seg );
266   tl_assert(!b); /* else ga is already bound */
267}
268
269static void unbind_addr_from_Seg ( Addr ga )
270{
271   Bool b;
272   UWord keyW, valW;
273   addr_to_seg_map_ENSURE_INIT();
274   b = VG_(delFromFM)( addr_to_seg_map, &keyW, &valW, (UWord)ga );
275   tl_assert(b); /* else ga was not already bound */
276   tl_assert(keyW == ga);
277   tl_assert(valW != 0);
278}
279
280
281//////////////////////////////////////////////////////////////
282//////////////////////////////////////////////////////////////
283//////////////////////////////////////////////////////////////
284
285// Returns the added heap segment
286static Seg* add_new_segment ( ThreadId tid, Addr p, SizeT size )
287{
288   Seg* seg = get_Seg_for_malloc();
289   tl_assert(seg != (Seg*)1); /* since we're using 1 as a special value */
290   seg->addr = p;
291   seg->szB  = size;
292   seg->ec   = VG_(record_ExeContext)( tid, 0/*first_ip_delta*/ );
293   tl_assert(!Seg__is_freed(seg));
294
295   bind_addr_to_Seg(p, seg);
296
297   return seg;
298}
299
300
301
302static
303void* alloc_and_new_mem_heap ( ThreadId tid,
304                               SizeT size, SizeT alignment, Bool is_zeroed )
305{
306   Addr p;
307
308   if ( ((SSizeT)size) < 0) return NULL;
309
310   p = (Addr)VG_(cli_malloc)(alignment, size);
311   if (is_zeroed) VG_(memset)((void*)p, 0, size);
312
313   add_new_segment( tid, p, size );
314
315   stats__client_mallocs++;
316   return (void*)p;
317}
318
319static void die_and_free_mem_heap ( ThreadId tid, Seg* seg )
320{
321   // Empty and free the actual block
322   tl_assert(!Seg__is_freed(seg));
323
324   VG_(cli_free)( (void*)seg->addr );
325
326   // Remember where freed
327   seg->ec = VG_(record_ExeContext)( tid, 0/*first_ip_delta*/ );
328
329   set_Seg_freed(seg);
330   unbind_addr_from_Seg( seg->addr );
331
332   stats__client_frees++;
333}
334
335static void handle_free_heap( ThreadId tid, void* p )
336{
337   Seg* seg = find_Seg_by_addr( (Addr)p );
338   if (!seg) {
339      /* freeing a block that wasn't malloc'd.  Ignore. */
340      return;
341   }
342   die_and_free_mem_heap( tid, seg );
343}
344
345
346/*------------------------------------------------------------*/
347/*--- malloc() et al replacements                          ---*/
348/*------------------------------------------------------------*/
349
350void* h_replace_malloc ( ThreadId tid, SizeT n )
351{
352   return alloc_and_new_mem_heap ( tid, n, VG_(clo_alignment),
353                                        /*is_zeroed*/False );
354}
355
356void* h_replace___builtin_new ( ThreadId tid, SizeT n )
357{
358   return alloc_and_new_mem_heap ( tid, n, VG_(clo_alignment),
359                                           /*is_zeroed*/False );
360}
361
362void* h_replace___builtin_vec_new ( ThreadId tid, SizeT n )
363{
364   return alloc_and_new_mem_heap ( tid, n, VG_(clo_alignment),
365                                           /*is_zeroed*/False );
366}
367
368void* h_replace_memalign ( ThreadId tid, SizeT align, SizeT n )
369{
370   return alloc_and_new_mem_heap ( tid, n, align,
371                                        /*is_zeroed*/False );
372}
373
374void* h_replace_calloc ( ThreadId tid, SizeT nmemb, SizeT size1 )
375{
376   return alloc_and_new_mem_heap ( tid, nmemb*size1, VG_(clo_alignment),
377                                        /*is_zeroed*/True );
378}
379
380void h_replace_free ( ThreadId tid, void* p )
381{
382   // Should arguably check here if p.vseg matches the segID of the
383   // pointed-to block... unfortunately, by this stage, we don't know what
384   // p.vseg is, because we don't know the address of p (the p here is a
385   // copy, and we've lost the address of its source).  To do so would
386   // require passing &p in, which would require rewriting part of
387   // vg_replace_malloc.c... argh.
388   //
389   // However, Memcheck does free checking, and will catch almost all
390   // violations this checking would have caught.  (Would only miss if we
391   // unluckily passed an unrelated pointer to the very start of a heap
392   // block that was unrelated to that block.  This is very unlikely!)    So
393   // we haven't lost much.
394
395   handle_free_heap(tid, p);
396}
397
398void h_replace___builtin_delete ( ThreadId tid, void* p )
399{
400   handle_free_heap(tid, p);
401}
402
403void h_replace___builtin_vec_delete ( ThreadId tid, void* p )
404{
405   handle_free_heap(tid, p);
406}
407
408void* h_replace_realloc ( ThreadId tid, void* p_old, SizeT new_size )
409{
410   Seg* seg;
411
412   /* First try and find the block. */
413   seg = find_Seg_by_addr( (Addr)p_old );
414   if (!seg)
415      return NULL;
416
417   tl_assert(seg->addr == (Addr)p_old);
418
419   if (new_size <= seg->szB) {
420      /* new size is smaller: allocate, copy from old to new */
421      Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
422      VG_(memcpy)((void*)p_new, p_old, new_size);
423
424      /* Free old memory */
425      die_and_free_mem_heap( tid, seg );
426
427      /* This has to be after die_and_free_mem_heap, otherwise the
428         former succeeds in shorting out the new block, not the
429         old, in the case when both are on the same list.  */
430      add_new_segment ( tid, p_new, new_size );
431
432      return (void*)p_new;
433   } else {
434      /* new size is bigger: allocate, copy from old to new */
435      Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
436      VG_(memcpy)((void*)p_new, p_old, seg->szB);
437
438      /* Free old memory */
439      die_and_free_mem_heap( tid, seg );
440
441      /* This has to be after die_and_free_mem_heap, otherwise the
442         former succeeds in shorting out the new block, not the old,
443         in the case when both are on the same list.  NB jrs
444         2008-Sept-11: not sure if this comment is valid/correct any
445         more -- I suspect not. */
446      add_new_segment ( tid, p_new, new_size );
447
448      return (void*)p_new;
449   }
450}
451
452SizeT h_replace_malloc_usable_size ( ThreadId tid, void* p )
453{
454   Seg* seg = find_Seg_by_addr( (Addr)p );
455
456   // There may be slop, but pretend there isn't because only the asked-for
457   // area will have been shadowed properly.
458   return ( seg ? seg->szB : 0 );
459}
460
461
462/*--------------------------------------------------------------------*/
463/*--- Instrumentation                                              ---*/
464/*--------------------------------------------------------------------*/
465
466/* The h_ instrumenter that follows is complex, since it deals with
467   shadow value computation.
468
469   It also needs to generate instrumentation for the sg_ side of
470   things.  That's relatively straightforward.  However, rather than
471   confuse the code herein any further, we simply delegate the problem
472   to sg_main.c, by using the four functions
473   sg_instrument_{init,fini,IRStmt,final_jump}.  These four completely
474   abstractify the sg_ instrumentation.  See comments in sg_main.c's
475   instrumentation section for further details. */
476
477
478/* Carries info about a particular tmp.  The tmp's number is not
479   recorded, as this is implied by (equal to) its index in the tmpMap
480   in PCEnv.  The tmp's type is also not recorded, as this is present
481   in PCEnv.sb->tyenv.
482
483   When .kind is NonShad, .shadow may give the identity of the temp
484   currently holding the associated shadow value, or it may be
485   IRTemp_INVALID if code to compute the shadow has not yet been
486   emitted.
487
488   When .kind is Shad tmp holds a shadow value, and so .shadow must be
489   IRTemp_INVALID, since it is illogical for a shadow tmp itself to be
490   shadowed.
491*/
492typedef
493   enum { NonShad=1, Shad=2 }
494   TempKind;
495
496typedef
497   struct {
498      TempKind kind;
499      IRTemp   shadow;
500   }
501   TempMapEnt;
502
503
504
505/* Carries around state during Ptrcheck instrumentation. */
506typedef
507   struct {
508      /* MODIFIED: the superblock being constructed.  IRStmts are
509         added. */
510      IRSB* sb;
511      Bool  trace;
512
513      /* MODIFIED: a table [0 .. #temps_in_sb-1] which gives the
514         current kind and possibly shadow temps for each temp in the
515         IRSB being constructed.  Note that it does not contain the
516         type of each tmp.  If you want to know the type, look at the
517         relevant entry in sb->tyenv.  It follows that at all times
518         during the instrumentation process, the valid indices for
519         tmpMap and sb->tyenv are identical, being 0 .. N-1 where N is
520         total number of NonShad and Shad temps allocated so far.
521
522         The reason for this strange split (types in one place, all
523         other info in another) is that we need the types to be
524         attached to sb so as to make it possible to do
525         "typeOfIRExpr(mce->bb->tyenv, ...)" at various places in the
526         instrumentation process.
527
528         Note that only integer temps of the guest word size are
529         shadowed, since it is impossible (or meaningless) to hold a
530         pointer in any other type of temp. */
531      XArray* /* of TempMapEnt */ qmpMap;
532
533      /* READONLY: the host word type.  Needed for constructing
534         arguments of type 'HWord' to be passed to helper functions.
535         Ity_I32 or Ity_I64 only. */
536      IRType hWordTy;
537
538      /* READONLY: the guest word type, Ity_I32 or Ity_I64 only. */
539      IRType gWordTy;
540
541      /* READONLY: the guest state size, so we can generate shadow
542         offsets correctly. */
543      Int guest_state_sizeB;
544   }
545   PCEnv;
546
547/* SHADOW TMP MANAGEMENT.  Shadow tmps are allocated lazily (on
548   demand), as they are encountered.  This is for two reasons.
549
550   (1) (less important reason): Many original tmps are unused due to
551   initial IR optimisation, and we do not want to spaces in tables
552   tracking them.
553
554   Shadow IRTemps are therefore allocated on demand.  pce.tmpMap is a
555   table indexed [0 .. n_types-1], which gives the current shadow for
556   each original tmp, or INVALID_IRTEMP if none is so far assigned.
557   It is necessary to support making multiple assignments to a shadow
558   -- specifically, after testing a shadow for definedness, it needs
559   to be made defined.  But IR's SSA property disallows this.
560
561   (2) (more important reason): Therefore, when a shadow needs to get
562   a new value, a new temporary is created, the value is assigned to
563   that, and the tmpMap is updated to reflect the new binding.
564
565   A corollary is that if the tmpMap maps a given tmp to
566   IRTemp_INVALID and we are hoping to read that shadow tmp, it means
567   there's a read-before-write error in the original tmps.  The IR
568   sanity checker should catch all such anomalies, however.
569*/
570
571/* Create a new IRTemp of type 'ty' and kind 'kind', and add it to
572   both the table in pce->sb and to our auxiliary mapping.  Note that
573   newTemp may cause pce->tmpMap to resize, hence previous results
574   from VG_(indexXA)(pce->tmpMap) are invalidated. */
575static IRTemp newTemp ( PCEnv* pce, IRType ty, TempKind kind )
576{
577   Word       newIx;
578   TempMapEnt ent;
579   IRTemp     tmp = newIRTemp(pce->sb->tyenv, ty);
580   ent.kind   = kind;
581   ent.shadow = IRTemp_INVALID;
582   newIx = VG_(addToXA)( pce->qmpMap, &ent );
583   tl_assert(newIx == (Word)tmp);
584   return tmp;
585}
586
587/*------------------------------------------------------------*/
588/*--- Constructing IR fragments                            ---*/
589/*------------------------------------------------------------*/
590
591/* add stmt to a bb */
592static /*inline*/ void stmt ( HChar cat, PCEnv* pce, IRStmt* st ) {
593   if (pce->trace) {
594      VG_(printf)("  %c: ", cat);
595      ppIRStmt(st);
596      VG_(printf)("\n");
597   }
598   addStmtToIRSB(pce->sb, st);
599}
600
601static IRTemp for_sg__newIRTemp_cb ( IRType ty, void* opaque )
602{
603   PCEnv* pce = (PCEnv*)opaque;
604   return newTemp( pce, ty, NonShad );
605}
606
607
608IRSB* h_instrument ( VgCallbackClosure* closure,
609                     IRSB* sbIn,
610                     VexGuestLayout* layout,
611                     VexGuestExtents* vge,
612                     IRType gWordTy, IRType hWordTy )
613{
614   Bool  verboze = 0||False;
615   Int   i /*, j*/;
616   PCEnv pce;
617   struct _SGEnv* sgenv;
618
619   if (gWordTy != hWordTy) {
620      /* We don't currently support this case. */
621      VG_(tool_panic)("host/guest word size mismatch");
622   }
623
624   /* Check we're not completely nuts */
625   tl_assert(sizeof(UWord)  == sizeof(void*));
626   tl_assert(sizeof(Word)   == sizeof(void*));
627   tl_assert(sizeof(Addr)   == sizeof(void*));
628   tl_assert(sizeof(ULong)  == 8);
629   tl_assert(sizeof(Long)   == 8);
630   tl_assert(sizeof(Addr64) == 8);
631   tl_assert(sizeof(UInt)   == 4);
632   tl_assert(sizeof(Int)    == 4);
633
634   /* Set up the running environment.  Both .sb and .tmpMap are
635      modified as we go along.  Note that tmps are added to both
636      .sb->tyenv and .tmpMap together, so the valid index-set for
637      those two arrays should always be identical. */
638   VG_(memset)(&pce, 0, sizeof(pce));
639   pce.sb                = deepCopyIRSBExceptStmts(sbIn);
640   pce.trace             = verboze;
641   pce.hWordTy           = hWordTy;
642   pce.gWordTy           = gWordTy;
643   pce.guest_state_sizeB = layout->total_sizeB;
644
645   pce.qmpMap = VG_(newXA)( VG_(malloc), "pc.h_instrument.1", VG_(free),
646                            sizeof(TempMapEnt));
647   for (i = 0; i < sbIn->tyenv->types_used; i++) {
648      TempMapEnt ent;
649      ent.kind   = NonShad;
650      ent.shadow = IRTemp_INVALID;
651      VG_(addToXA)( pce.qmpMap, &ent );
652   }
653   tl_assert( VG_(sizeXA)( pce.qmpMap ) == sbIn->tyenv->types_used );
654
655   /* Also set up for the sg_ instrumenter.  See comments at the top
656      of this instrumentation section for details.  The two parameters
657      constitute a closure, which sg_ can use to correctly generate
658      new IRTemps as needed. */
659   sgenv = sg_instrument_init( for_sg__newIRTemp_cb,
660                               (void*)&pce );
661
662   /* Copy verbatim any IR preamble preceding the first IMark */
663
664   i = 0;
665   while (i < sbIn->stmts_used && sbIn->stmts[i]->tag != Ist_IMark) {
666      IRStmt* st = sbIn->stmts[i];
667      tl_assert(st);
668      tl_assert(isFlatIRStmt(st));
669      stmt( 'C', &pce, sbIn->stmts[i] );
670      i++;
671   }
672
673   /* Iterate over the remaining stmts to generate instrumentation. */
674
675   tl_assert(sbIn->stmts_used > 0);
676   tl_assert(i >= 0);
677   tl_assert(i < sbIn->stmts_used);
678   tl_assert(sbIn->stmts[i]->tag == Ist_IMark);
679
680   for (/*use current i*/; i < sbIn->stmts_used; i++) {
681      /* generate sg_ instrumentation for this stmt */
682      sg_instrument_IRStmt( sgenv, pce.sb, sbIn->stmts[i],
683                            layout, gWordTy, hWordTy );
684
685      stmt( 'C', &pce, sbIn->stmts[i] );
686   }
687
688   /* generate sg_ instrumentation for the final jump */
689   sg_instrument_final_jump( sgenv, pce.sb, sbIn->next, sbIn->jumpkind,
690                             layout, gWordTy, hWordTy );
691
692   /* and finalise .. */
693   sg_instrument_fini( sgenv );
694
695   /* If this fails, there's been some serious snafu with tmp management,
696      that should be investigated. */
697   tl_assert( VG_(sizeXA)( pce.qmpMap ) == pce.sb->tyenv->types_used );
698   VG_(deleteXA)( pce.qmpMap );
699
700   return pce.sb;
701}
702
703
704/*--------------------------------------------------------------------*/
705/*--- Finalisation                                                 ---*/
706/*--------------------------------------------------------------------*/
707
708void h_fini ( Int exitcode )
709{
710   if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
711      VG_(message)(Vg_UserMsg,
712                   "For counts of detected and suppressed errors, "
713                   "rerun with: -v\n");
714   }
715
716   if (VG_(clo_stats)) {
717      VG_(message)(Vg_DebugMsg,
718                   "  h_:  %'10llu client allocs, %'10llu client frees\n",
719                   stats__client_mallocs, stats__client_frees);
720      VG_(message)(Vg_DebugMsg,
721                   "  h_:  %'10llu Segs allocd,   %'10llu Segs recycled\n",
722                   stats__segs_allocd, stats__segs_recycled);
723   }
724}
725
726
727/*--------------------------------------------------------------------*/
728/*--- end                                                 h_main.c ---*/
729/*--------------------------------------------------------------------*/
730