mc_leakcheck.c revision 718d3b11d1d3f2665c05b5670893baf1ad068401
1
2/*--------------------------------------------------------------------*/
3/*--- The leak checker.                             mc_leakcheck.c ---*/
4/*--------------------------------------------------------------------*/
5
6/*
7   This file is part of MemCheck, a heavyweight Valgrind tool for
8   detecting memory errors.
9
10   Copyright (C) 2000-2006 Julian Seward
11      jseward@acm.org
12
13   This program is free software; you can redistribute it and/or
14   modify it under the terms of the GNU General Public License as
15   published by the Free Software Foundation; either version 2 of the
16   License, or (at your option) any later version.
17
18   This program is distributed in the hope that it will be useful, but
19   WITHOUT ANY WARRANTY; without even the implied warranty of
20   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21   General Public License for more details.
22
23   You should have received a copy of the GNU General Public License
24   along with this program; if not, write to the Free Software
25   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26   02111-1307, USA.
27
28   The GNU General Public License is contained in the file COPYING.
29*/
30
31#include "pub_tool_basics.h"
32#include "pub_tool_vki.h"
33#include "pub_tool_aspacemgr.h"
34#include "pub_tool_execontext.h"
35#include "pub_tool_hashtable.h"
36#include "pub_tool_libcbase.h"
37#include "pub_tool_libcassert.h"
38#include "pub_tool_libcprint.h"
39#include "pub_tool_libcsignal.h"
40#include "pub_tool_machine.h"
41#include "pub_tool_mallocfree.h"
42#include "pub_tool_options.h"
43#include "pub_tool_signals.h"
44#include "pub_tool_tooliface.h"     // Needed for mc_include.h
45
46#include "mc_include.h"
47
48#include <setjmp.h>                 // For jmp_buf
49
50
51/* Define to debug the memory-leak-detector. */
52#define VG_DEBUG_LEAKCHECK 0
53#define VG_DEBUG_CLIQUE	   0
54
55/*------------------------------------------------------------*/
56/*--- Low-level address-space scanning, for the leak       ---*/
57/*--- detector.                                            ---*/
58/*------------------------------------------------------------*/
59
60static
61jmp_buf memscan_jmpbuf;
62
63
64static
65void scan_all_valid_memory_catcher ( Int sigNo, Addr addr )
66{
67   if (0)
68      VG_(printf)("OUCH! sig=%d addr=%p\n", sigNo, addr);
69   if (sigNo == VKI_SIGSEGV || sigNo == VKI_SIGBUS)
70      __builtin_longjmp(memscan_jmpbuf, 1);
71}
72
73
74/* TODO: GIVE THIS A PROPER HOME
75   TODO: MERGE THIS WITH DUPLICATE IN m_main.c and coredump-elf.c.
76   Extract from aspacem a vector of the current segment start
77   addresses.  The vector is dynamically allocated and should be freed
78   by the caller when done.  REQUIRES m_mallocfree to be running.
79   Writes the number of addresses required into *n_acquired. */
80
81static Addr* get_seg_starts ( /*OUT*/Int* n_acquired )
82{
83   Addr* starts;
84   Int   n_starts, r = 0;
85
86   n_starts = 1;
87   while (True) {
88      starts = VG_(malloc)( n_starts * sizeof(Addr) );
89      if (starts == NULL)
90         break;
91      r = VG_(am_get_segment_starts)( starts, n_starts );
92      if (r >= 0)
93         break;
94      VG_(free)(starts);
95      n_starts *= 2;
96   }
97
98   if (starts == NULL) {
99     *n_acquired = 0;
100     return NULL;
101   }
102
103   *n_acquired = r;
104   return starts;
105}
106
107
108/*------------------------------------------------------------*/
109/*--- Detecting leaked (unreachable) malloc'd blocks.      ---*/
110/*------------------------------------------------------------*/
111
112/* An entry in the mark stack */
113typedef
114   struct {
115      Int   next:30;	/* Index of next in mark stack */
116      UInt  state:2;	/* Reachedness */
117      SizeT indirect;	/* if Unreached, how much is unreachable from here */
118   }
119   MarkStack;
120
121/* Find the i such that ptr points at or inside the block described by
122   shadows[i].  Return -1 if none found.  This assumes that shadows[]
123   has been sorted on the ->data field. */
124
125#if VG_DEBUG_LEAKCHECK
126/* Used to sanity-check the fast binary-search mechanism. */
127static
128Int find_shadow_for_OLD ( Addr       ptr,
129                          MC_Chunk** shadows,
130                          Int        n_shadows )
131
132{
133   Int  i;
134   Addr a_lo, a_hi;
135   PROF_EVENT(70, "find_shadow_for_OLD");
136   for (i = 0; i < n_shadows; i++) {
137      PROF_EVENT(71, "find_shadow_for_OLD(loop)");
138      a_lo = shadows[i]->data;
139      a_hi = ((Addr)shadows[i]->data) + shadows[i]->szB;
140      if (a_lo <= ptr && ptr < a_hi)
141         return i;
142   }
143   return -1;
144}
145#endif
146
147
148static
149Int find_shadow_for ( Addr       ptr,
150                      MC_Chunk** shadows,
151                      Int        n_shadows )
152{
153   Addr a_mid_lo, a_mid_hi;
154   Int lo, mid, hi, retVal;
155   /* VG_(printf)("find shadow for %p = ", ptr); */
156   retVal = -1;
157   lo = 0;
158   hi = n_shadows-1;
159   while (True) {
160      /* invariant: current unsearched space is from lo to hi, inclusive. */
161      if (lo > hi) break; /* not found */
162
163      mid      = (lo + hi) / 2;
164      a_mid_lo = shadows[mid]->data;
165      a_mid_hi = shadows[mid]->data + shadows[mid]->szB;
166      /* Extent of block 'mid' is [a_mid_lo .. a_mid_hi).
167         Special-case zero-sized blocks - treat them as if they had
168         size 1.  Not doing so causes them to not cover any address
169         range at all and so will never be identified as the target of
170         any pointer, which causes them to be incorrectly reported as
171         definitely leaked. */
172      if (shadows[mid]->szB == 0)
173         a_mid_hi++;
174
175      if (ptr < a_mid_lo) {
176         hi = mid-1;
177         continue;
178      }
179      if (ptr >= a_mid_hi) {
180         lo = mid+1;
181         continue;
182      }
183      tl_assert(ptr >= a_mid_lo && ptr < a_mid_hi);
184      retVal = mid;
185      break;
186   }
187
188#  if VG_DEBUG_LEAKCHECK
189   tl_assert(retVal == find_shadow_for_OLD ( ptr, shadows, n_shadows ));
190#  endif
191   /* VG_(printf)("%d\n", retVal); */
192   return retVal;
193}
194
195/* Globals, for the following callback used by VG_(detect_memory_leaks). */
196static MC_Chunk** lc_shadows;
197static Int        lc_n_shadows;
198static MarkStack* lc_markstack;
199static Int	  lc_markstack_top;
200static Addr       lc_min_mallocd_addr;
201static Addr       lc_max_mallocd_addr;
202static SizeT	  lc_scanned;
203
204static Bool	  (*lc_is_within_valid_secondary) (Addr addr);
205static Bool	  (*lc_is_valid_aligned_word)     (Addr addr);
206
207
208SizeT MC_(bytes_leaked)     = 0;
209SizeT MC_(bytes_indirect)   = 0;
210SizeT MC_(bytes_dubious)    = 0;
211SizeT MC_(bytes_reachable)  = 0;
212SizeT MC_(bytes_suppressed) = 0;
213
214static Int lc_compar(void* n1, void* n2)
215{
216   MC_Chunk* mc1 = *(MC_Chunk**)n1;
217   MC_Chunk* mc2 = *(MC_Chunk**)n2;
218   return (mc1->data < mc2->data ? -1 : 1);
219}
220
221/* If ptr is pointing to a heap-allocated block which hasn't been seen
222   before, push it onto the mark stack.  Clique is the index of the
223   clique leader; -1 if none. */
224static void lc_markstack_push_WRK(Addr ptr, Int clique)
225{
226   Int sh_no;
227
228   /* quick filter */
229   if (!VG_(am_is_valid_for_client)(ptr, 1, VKI_PROT_NONE))
230      return;
231
232   sh_no = find_shadow_for(ptr, lc_shadows, lc_n_shadows);
233
234   if (VG_DEBUG_LEAKCHECK)
235      VG_(printf)("ptr=%p -> block %d\n", ptr, sh_no);
236
237   if (sh_no == -1)
238      return;
239
240   tl_assert(sh_no >= 0 && sh_no < lc_n_shadows);
241   tl_assert(ptr >= lc_shadows[sh_no]->data);
242   tl_assert(ptr < lc_shadows[sh_no]->data
243                   + lc_shadows[sh_no]->szB
244                   + (lc_shadows[sh_no]->szB==0  ? 1  : 0));
245
246   if (lc_markstack[sh_no].state == Unreached) {
247      if (0)
248	 VG_(printf)("pushing %p-%p\n", lc_shadows[sh_no]->data,
249		     lc_shadows[sh_no]->data + lc_shadows[sh_no]->szB);
250
251      tl_assert(lc_markstack[sh_no].next == -1);
252      lc_markstack[sh_no].next = lc_markstack_top;
253      lc_markstack_top = sh_no;
254   }
255
256   tl_assert(clique >= -1 && clique < lc_n_shadows);
257
258   if (clique != -1) {
259      if (0)
260	 VG_(printf)("mopup: %d: %p is %d\n",
261		     sh_no, lc_shadows[sh_no]->data, lc_markstack[sh_no].state);
262
263      /* An unmarked block - add it to the clique.  Add its size to
264	 the clique-leader's indirect size.  If the new block was
265	 itself a clique leader, it isn't any more, so add its
266	 indirect to the new clique leader.
267
268	 If this block *is* the clique leader, it means this is a
269	 cyclic structure, so none of this applies. */
270      if (lc_markstack[sh_no].state == Unreached) {
271	 lc_markstack[sh_no].state = IndirectLeak;
272
273	 if (sh_no != clique) {
274	    if (VG_DEBUG_CLIQUE) {
275	       if (lc_markstack[sh_no].indirect)
276		  VG_(printf)("  clique %d joining clique %d adding %d+%d bytes\n",
277			      sh_no, clique,
278			      lc_shadows[sh_no]->szB, lc_markstack[sh_no].indirect);
279	       else
280		  VG_(printf)("  %d joining %d adding %d\n",
281			      sh_no, clique, lc_shadows[sh_no]->szB);
282	    }
283
284	    lc_markstack[clique].indirect += lc_shadows[sh_no]->szB;
285	    lc_markstack[clique].indirect += lc_markstack[sh_no].indirect;
286	    lc_markstack[sh_no].indirect = 0; /* shouldn't matter */
287	 }
288      }
289   } else if (ptr == lc_shadows[sh_no]->data) {
290      lc_markstack[sh_no].state = Proper;
291   } else {
292      if (lc_markstack[sh_no].state == Unreached)
293	 lc_markstack[sh_no].state = Interior;
294   }
295}
296
297static void lc_markstack_push(Addr ptr)
298{
299   lc_markstack_push_WRK(ptr, -1);
300}
301
302/* Return the top of the mark stack, if any. */
303static Int lc_markstack_pop(void)
304{
305   Int ret = lc_markstack_top;
306
307   if (ret != -1) {
308      lc_markstack_top = lc_markstack[ret].next;
309      lc_markstack[ret].next = -1;
310   }
311
312   return ret;
313}
314
315
316/* Scan a block of memory between [start, start+len).  This range may
317   be bogus, inaccessable, or otherwise strange; we deal with it.
318
319   If clique != -1, it means we're gathering leaked memory into
320   cliques, and clique is the index of the current clique leader. */
321static void lc_scan_memory_WRK(Addr start, SizeT len, Int clique)
322{
323   Addr ptr = VG_ROUNDUP(start, sizeof(Addr));
324   Addr end = VG_ROUNDDN(start+len, sizeof(Addr));
325   vki_sigset_t sigmask;
326
327   if (VG_DEBUG_LEAKCHECK)
328      VG_(printf)("scan %p-%p\n", start, start+len);
329   VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &sigmask);
330   VG_(set_fault_catcher)(scan_all_valid_memory_catcher);
331
332   //   lc_scanned += end-ptr;
333
334   if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ))
335      ptr = VG_PGROUNDUP(ptr+1);	/* first page bad */
336
337   while (ptr < end) {
338      Addr addr;
339
340      /* Skip invalid chunks */
341      if (!(*lc_is_within_valid_secondary)(ptr)) {
342	 ptr = VG_ROUNDUP(ptr+1, SM_SIZE);
343	 continue;
344      }
345
346      /* Look to see if this page seems reasonble */
347      if ((ptr % VKI_PAGE_SIZE) == 0) {
348	 if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ))
349	    ptr += VKI_PAGE_SIZE; /* bad page - skip it */
350      }
351
352      if (__builtin_setjmp(memscan_jmpbuf) == 0) {
353	 if ((*lc_is_valid_aligned_word)(ptr)) {
354            lc_scanned += sizeof(Addr);
355	    addr = *(Addr *)ptr;
356	    lc_markstack_push_WRK(addr, clique);
357	 } else if (0 && VG_DEBUG_LEAKCHECK)
358	    VG_(printf)("%p not valid\n", ptr);
359	 ptr += sizeof(Addr);
360      } else {
361	 /* We need to restore the signal mask, because we were
362	    longjmped out of a signal handler. */
363	 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
364
365	 ptr = VG_PGROUNDUP(ptr+1);	/* bad page - skip it */
366      }
367   }
368
369   VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
370   VG_(set_fault_catcher)(NULL);
371}
372
373
374static void lc_scan_memory(Addr start, SizeT len)
375{
376   lc_scan_memory_WRK(start, len, -1);
377}
378
379/* Process the mark stack until empty.  If mopup is true, then we're
380   actually gathering leaked blocks, so they should be marked
381   IndirectLeak. */
382static void lc_do_leakcheck(Int clique)
383{
384   Int top;
385
386   while((top = lc_markstack_pop()) != -1) {
387      tl_assert(top >= 0 && top < lc_n_shadows);
388      tl_assert(lc_markstack[top].state != Unreached);
389
390      lc_scan_memory_WRK(lc_shadows[top]->data, lc_shadows[top]->szB, clique);
391   }
392}
393
394static SizeT blocks_leaked;
395static SizeT blocks_indirect;
396static SizeT blocks_dubious;
397static SizeT blocks_reachable;
398static SizeT blocks_suppressed;
399
400static void full_report(ThreadId tid)
401{
402   Int i;
403   Int    n_lossrecords;
404   LossRecord* errlist;
405   LossRecord* p;
406   Bool   is_suppressed;
407
408   /* Go through and group lost structures into cliques.  For each
409      Unreached block, push it onto the mark stack, and find all the
410      blocks linked to it.  These are marked IndirectLeak, and their
411      size is added to the clique leader's indirect size.  If one of
412      the found blocks was itself a clique leader (from a previous
413      pass), then the cliques are merged. */
414   for (i = 0; i < lc_n_shadows; i++) {
415      if (VG_DEBUG_CLIQUE)
416	 VG_(printf)("cliques: %d at %p -> Loss state %d\n",
417		     i, lc_shadows[i]->data, lc_markstack[i].state);
418      if (lc_markstack[i].state != Unreached)
419	 continue;
420
421      tl_assert(lc_markstack_top == -1);
422
423      if (VG_DEBUG_CLIQUE)
424	 VG_(printf)("%d: gathering clique %p\n", i, lc_shadows[i]->data);
425
426      lc_markstack_push_WRK(lc_shadows[i]->data, i);
427
428      lc_do_leakcheck(i);
429
430      tl_assert(lc_markstack_top == -1);
431      tl_assert(lc_markstack[i].state == IndirectLeak
432                /* jrs 20051218: Ashley Pittman supplied a
433                   custom-allocator test program which causes the ==
434                   IndirectLeak condition to fail - it causes .state
435                   to be Unreached.  Since I have no idea how this
436                   clique stuff works and no time to figure it out,
437                   just allow that condition too.  This could well be
438                   a completely bogus fix.  It doesn't seem unsafe
439                   given that in any case the .state field is
440                   immediately overwritten by the next statement. */
441                || lc_markstack[i].state == Unreached);
442
443      lc_markstack[i].state = Unreached; /* Return to unreached state,
444					    to indicate its a clique
445					    leader */
446   }
447
448   /* Common up the lost blocks so we can print sensible error messages. */
449   n_lossrecords = 0;
450   errlist       = NULL;
451   for (i = 0; i < lc_n_shadows; i++) {
452      ExeContext* where = lc_shadows[i]->where;
453
454      for (p = errlist; p != NULL; p = p->next) {
455         if (p->loss_mode == lc_markstack[i].state
456             && VG_(eq_ExeContext) ( MC_(clo_leak_resolution),
457                                     p->allocated_at,
458                                     where) ) {
459            break;
460	 }
461      }
462      if (p != NULL) {
463         p->num_blocks  ++;
464         p->total_bytes += lc_shadows[i]->szB;
465	 p->indirect_bytes += lc_markstack[i].indirect;
466      } else {
467         n_lossrecords ++;
468         p = VG_(malloc)(sizeof(LossRecord));
469         p->loss_mode    = lc_markstack[i].state;
470         p->allocated_at = where;
471         p->total_bytes  = lc_shadows[i]->szB;
472	 p->indirect_bytes = lc_markstack[i].indirect;
473         p->num_blocks   = 1;
474         p->next         = errlist;
475         errlist         = p;
476      }
477   }
478
479   /* Print out the commoned-up blocks and collect summary stats. */
480   for (i = 0; i < n_lossrecords; i++) {
481      Bool        print_record;
482      LossRecord* p_min = NULL;
483      SizeT       n_min = ~(0x0L);
484      for (p = errlist; p != NULL; p = p->next) {
485         if (p->num_blocks > 0 && p->total_bytes < n_min) {
486            n_min = p->total_bytes + p->indirect_bytes;
487            p_min = p;
488         }
489      }
490      tl_assert(p_min != NULL);
491
492      /* Ok to have tst==NULL;  it's only used if --gdb-attach=yes, and
493         we disallow that when --leak-check=yes.
494
495         Prints the error if not suppressed, unless it's reachable (Proper
496         or IndirectLeak) and --show-reachable=no */
497
498      print_record = ( MC_(clo_show_reachable) ||
499		       Unreached == p_min->loss_mode ||
500                       Interior == p_min->loss_mode );
501
502      // Nb: because VG_(unique_error) does all the error processing
503      // immediately, and doesn't save the error, leakExtra can be
504      // stack-allocated.
505      is_suppressed =
506         MC_(record_leak_error) ( tid, i+1, n_lossrecords, p_min,
507                                  print_record );
508
509      if (is_suppressed) {
510         blocks_suppressed     += p_min->num_blocks;
511         MC_(bytes_suppressed) += p_min->total_bytes;
512
513      } else if (Unreached == p_min->loss_mode) {
514         blocks_leaked       += p_min->num_blocks;
515         MC_(bytes_leaked)   += p_min->total_bytes;
516
517      } else if (IndirectLeak == p_min->loss_mode) {
518         blocks_indirect     += p_min->num_blocks;
519         MC_(bytes_indirect) += p_min->total_bytes;
520
521      } else if (Interior   == p_min->loss_mode) {
522         blocks_dubious     += p_min->num_blocks;
523         MC_(bytes_dubious) += p_min->total_bytes;
524
525      } else if (Proper       == p_min->loss_mode) {
526         blocks_reachable     += p_min->num_blocks;
527         MC_(bytes_reachable) += p_min->total_bytes;
528
529      } else {
530         VG_(tool_panic)("generic_detect_memory_leaks: unknown loss mode");
531      }
532      p_min->num_blocks = 0;
533   }
534}
535
536/* Compute a quick summary of the leak check. */
537static void make_summary(void)
538{
539   Int i;
540
541   for(i = 0; i < lc_n_shadows; i++) {
542      SizeT size = lc_shadows[i]->szB;
543
544      switch(lc_markstack[i].state) {
545      case Unreached:
546	 blocks_leaked++;
547	 MC_(bytes_leaked) += size;
548	 break;
549
550      case Proper:
551	 blocks_reachable++;
552	 MC_(bytes_reachable) += size;
553	 break;
554
555      case Interior:
556	 blocks_dubious++;
557	 MC_(bytes_dubious) += size;
558	 break;
559
560      case IndirectLeak:	/* shouldn't happen */
561	 blocks_indirect++;
562	 MC_(bytes_indirect) += size;
563	 break;
564      }
565   }
566}
567
568static MC_Chunk**
569find_active_shadows(UInt* n_shadows)
570{
571   /* Our goal is to construct a set of shadows that includes every
572    * mempool chunk, and every malloc region that *doesn't* contain a
573    * mempool chunk. We do this in several phases.
574    *
575    * First we collect all the malloc chunks into an array and sort it.
576    * We do this because we want to query the chunks by interior
577    * pointers, requiring binary search.
578    *
579    * Second we build an array containing a Bool for each malloc chunk,
580    * indicating whether it contains any mempools.
581    *
582    * Third we loop over the mempool tables. For each chunk in each
583    * pool, we set the entry in the Bool array corresponding to the
584    * malloc chunk containing the mempool chunk.
585    *
586    * Finally we copy the mempool chunks and the non-marked malloc
587    * chunks into a combined array of shadows, free our temporaries,
588    * and return the combined array.
589    */
590
591   MC_Mempool *mp;
592   MC_Chunk **mallocs, **shadows, *mc;
593   UInt n_mallocs, m, s;
594   Bool *malloc_chunk_holds_a_pool_chunk;
595
596   mallocs = (MC_Chunk**) VG_(HT_to_array)( MC_(malloc_list), &n_mallocs );
597
598   if (n_mallocs == 0) {
599      tl_assert(mallocs == NULL);
600      *n_shadows = 0;
601      return NULL;
602   }
603
604   VG_(ssort)((void*)mallocs, n_mallocs,
605              sizeof(VgHashNode*), lc_compar);
606
607   malloc_chunk_holds_a_pool_chunk = VG_(calloc)( n_mallocs, sizeof(Bool) );
608
609   *n_shadows = n_mallocs;
610
611   VG_(HT_ResetIter)(MC_(mempool_list));
612   while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
613      VG_(HT_ResetIter)(mp->chunks);
614      while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
615
616         /* We'll need a shadow for this chunk. */
617         ++(*n_shadows);
618
619         /* Possibly invalidate the malloc holding the beginning of this chunk. */
620         m = find_shadow_for(mc->data, mallocs, n_mallocs);
621         if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) {
622            tl_assert(*n_shadows > 0);
623            --(*n_shadows);
624            malloc_chunk_holds_a_pool_chunk[m] = True;
625         }
626
627         /* Possibly invalidate the malloc holding the end of this chunk. */
628         if (mc->szB > 1) {
629            m = find_shadow_for(mc->data + (mc->szB - 1), mallocs, n_mallocs);
630            if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) {
631               tl_assert(*n_shadows > 0);
632               --(*n_shadows);
633               malloc_chunk_holds_a_pool_chunk[m] = True;
634            }
635         }
636      }
637   }
638
639   tl_assert(*n_shadows > 0);
640   shadows = VG_(malloc)(sizeof(VgHashNode*) * (*n_shadows));
641   s = 0;
642
643   /* Copy the mempool chunks into the final array. */
644   VG_(HT_ResetIter)(MC_(mempool_list));
645   while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
646      VG_(HT_ResetIter)(mp->chunks);
647      while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
648         tl_assert(s < *n_shadows);
649         shadows[s++] = mc;
650      }
651   }
652
653   /* Copy the malloc chunks into the final array. */
654   for (m = 0; m < n_mallocs; ++m) {
655      if (!malloc_chunk_holds_a_pool_chunk[m]) {
656         tl_assert(s < *n_shadows);
657         shadows[s++] = mallocs[m];
658      }
659   }
660
661   tl_assert(s == *n_shadows);
662   VG_(free)(mallocs);
663   VG_(free)(malloc_chunk_holds_a_pool_chunk);
664
665   return shadows;
666}
667
668
669/* Top level entry point to leak detector.  Call here, passing in
670   suitable address-validating functions (see comment at top of
671   scan_all_valid_memory above).  These functions used to encapsulate the
672   differences between Memcheck and Addrcheck;  they no longer do but it
673   doesn't hurt to keep them here.
674*/
675void MC_(do_detect_memory_leaks) (
676   ThreadId tid, LeakCheckMode mode,
677   Bool (*is_within_valid_secondary) ( Addr ),
678   Bool (*is_valid_aligned_word)     ( Addr )
679)
680{
681   Int i;
682
683   tl_assert(mode != LC_Off);
684
685   lc_shadows = find_active_shadows(&lc_n_shadows);
686
687   /* Sort the array. */
688   VG_(ssort)((void*)lc_shadows, lc_n_shadows, sizeof(VgHashNode*), lc_compar);
689
690   /* Sanity check; assert that the blocks are now in order */
691   for (i = 0; i < lc_n_shadows-1; i++) {
692      tl_assert( lc_shadows[i]->data <= lc_shadows[i+1]->data);
693   }
694
695   /* Sanity check -- make sure they don't overlap */
696   for (i = 0; i < lc_n_shadows-1; i++) {
697      tl_assert( lc_shadows[i]->data + lc_shadows[i]->szB
698                 <= lc_shadows[i+1]->data );
699   }
700
701   if (lc_n_shadows == 0) {
702      tl_assert(lc_shadows == NULL);
703      if (VG_(clo_verbosity) >= 1 && !VG_(clo_xml)) {
704         VG_(message)(Vg_UserMsg,
705                      "All heap blocks were freed -- no leaks are possible.");
706      }
707      return;
708   }
709
710   if (VG_(clo_verbosity) > 0 && !VG_(clo_xml))
711      VG_(message)(Vg_UserMsg,
712                   "searching for pointers to %,d not-freed blocks.",
713                   lc_n_shadows );
714
715   lc_min_mallocd_addr = lc_shadows[0]->data;
716   lc_max_mallocd_addr = lc_shadows[lc_n_shadows-1]->data
717                         + lc_shadows[lc_n_shadows-1]->szB;
718
719   lc_markstack = VG_(malloc)( lc_n_shadows * sizeof(*lc_markstack) );
720   for (i = 0; i < lc_n_shadows; i++) {
721      lc_markstack[i].next = -1;
722      lc_markstack[i].state = Unreached;
723      lc_markstack[i].indirect = 0;
724   }
725   lc_markstack_top = -1;
726
727   lc_is_within_valid_secondary = is_within_valid_secondary;
728   lc_is_valid_aligned_word     = is_valid_aligned_word;
729
730   lc_scanned = 0;
731
732   /* Push roots onto the mark stack.  Roots are:
733      - the integer registers of all threads
734      - all mappings belonging to the client, including stacks
735      - .. but excluding any client heap segments.
736      Client heap segments are excluded because we wish to differentiate
737      client heap blocks which are referenced only from inside the heap
738      from those outside.  This facilitates the indirect vs direct loss
739      categorisation, which [if the users ever manage to understand it]
740      is really useful for detecting lost cycles.
741   */
742   { Addr*     seg_starts;
743     Int       n_seg_starts;
744     seg_starts = get_seg_starts( &n_seg_starts );
745     tl_assert(seg_starts && n_seg_starts > 0);
746     /* VG_(am_show_nsegments)( 0,"leakcheck"); */
747     for (i = 0; i < n_seg_starts; i++) {
748        NSegment const* seg = VG_(am_find_nsegment)( seg_starts[i] );
749        tl_assert(seg);
750        if (seg->kind != SkFileC && seg->kind != SkAnonC)
751           continue;
752        if (!(seg->hasR && seg->hasW))
753           continue;
754        if (seg->isCH)
755           continue;
756
757        /* Don't poke around in device segments as this may cause
758           hangs.  Exclude /dev/zero just in case someone allocated
759           memory by explicitly mapping /dev/zero. */
760        if (seg->kind == SkFileC
761            && (VKI_S_ISCHR(seg->mode) || VKI_S_ISBLK(seg->mode))) {
762           HChar* dev_name = VG_(am_get_filename)( (NSegment*)seg );
763           if (dev_name && 0 == VG_(strcmp)(dev_name, "/dev/zero")) {
764              /* don't skip /dev/zero */
765           } else {
766              /* skip this device mapping */
767              continue;
768           }
769        }
770
771        if (0)
772           VG_(printf)("ACCEPT %2d  %p %p\n", i, seg->start, seg->end);
773        lc_scan_memory(seg->start, seg->end+1 - seg->start);
774     }
775   }
776
777   /* Push registers onto mark stack */
778   VG_(apply_to_GP_regs)(lc_markstack_push);
779
780   /* Keep walking the heap until everything is found */
781   lc_do_leakcheck(-1);
782
783   if (VG_(clo_verbosity) > 0 && !VG_(clo_xml))
784      VG_(message)(Vg_UserMsg, "checked %,lu bytes.", lc_scanned);
785
786   blocks_leaked     = MC_(bytes_leaked)     = 0;
787   blocks_indirect   = MC_(bytes_indirect)   = 0;
788   blocks_dubious    = MC_(bytes_dubious)    = 0;
789   blocks_reachable  = MC_(bytes_reachable)  = 0;
790   blocks_suppressed = MC_(bytes_suppressed) = 0;
791
792   if (mode == LC_Full)
793      full_report(tid);
794   else
795      make_summary();
796
797   if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
798      VG_(message)(Vg_UserMsg, "");
799      VG_(message)(Vg_UserMsg, "LEAK SUMMARY:");
800      VG_(message)(Vg_UserMsg, "   definitely lost: %,lu bytes in %,lu blocks.",
801                               MC_(bytes_leaked), blocks_leaked );
802      if (blocks_indirect > 0)
803	 VG_(message)(Vg_UserMsg, "   indirectly lost: %,lu bytes in %,lu blocks.",
804		      MC_(bytes_indirect), blocks_indirect );
805      VG_(message)(Vg_UserMsg, "     possibly lost: %,lu bytes in %,lu blocks.",
806                               MC_(bytes_dubious), blocks_dubious );
807      VG_(message)(Vg_UserMsg, "   still reachable: %,lu bytes in %,lu blocks.",
808                               MC_(bytes_reachable), blocks_reachable );
809      VG_(message)(Vg_UserMsg, "        suppressed: %,lu bytes in %,lu blocks.",
810                               MC_(bytes_suppressed), blocks_suppressed );
811      if (mode == LC_Summary
812          && (blocks_leaked + blocks_indirect
813              + blocks_dubious + blocks_reachable) > 0) {
814         VG_(message)(Vg_UserMsg,
815                      "Rerun with --leak-check=full to see details of leaked memory.");
816      }
817      if (blocks_reachable > 0 && !MC_(clo_show_reachable) && mode == LC_Full) {
818         VG_(message)(Vg_UserMsg,
819           "Reachable blocks (those to which a pointer was found) are not shown.");
820         VG_(message)(Vg_UserMsg,
821            "To see them, rerun with: --leak-check=full --show-reachable=yes");
822      }
823   }
824
825   VG_(free) ( lc_shadows );
826   VG_(free) ( lc_markstack );
827}
828
829/*--------------------------------------------------------------------*/
830/*--- end                                                          ---*/
831/*--------------------------------------------------------------------*/
832
833