mc_leakcheck.c revision a0b6b2cf9abc7b0d87be1215a245eaccc0452af9
1
2/*--------------------------------------------------------------------*/
3/*--- The leak checker.                             mc_leakcheck.c ---*/
4/*--------------------------------------------------------------------*/
5
6/*
7   This file is part of MemCheck, a heavyweight Valgrind tool for
8   detecting memory errors.
9
10   Copyright (C) 2000-2008 Julian Seward
11      jseward@acm.org
12
13   This program is free software; you can redistribute it and/or
14   modify it under the terms of the GNU General Public License as
15   published by the Free Software Foundation; either version 2 of the
16   License, or (at your option) any later version.
17
18   This program is distributed in the hope that it will be useful, but
19   WITHOUT ANY WARRANTY; without even the implied warranty of
20   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21   General Public License for more details.
22
23   You should have received a copy of the GNU General Public License
24   along with this program; if not, write to the Free Software
25   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26   02111-1307, USA.
27
28   The GNU General Public License is contained in the file COPYING.
29*/
30
31#include "pub_tool_basics.h"
32#include "pub_tool_vki.h"
33#include "pub_tool_aspacemgr.h"
34#include "pub_tool_execontext.h"
35#include "pub_tool_hashtable.h"
36#include "pub_tool_libcbase.h"
37#include "pub_tool_libcassert.h"
38#include "pub_tool_libcprint.h"
39#include "pub_tool_libcsignal.h"
40#include "pub_tool_machine.h"
41#include "pub_tool_mallocfree.h"
42#include "pub_tool_options.h"
43#include "pub_tool_signals.h"
44#include "pub_tool_tooliface.h"     // Needed for mc_include.h
45
46#include "mc_include.h"
47
48#include <setjmp.h>                 // For jmp_buf
49
50
51/* Define to debug the memory-leak-detector. */
52#define VG_DEBUG_LEAKCHECK 0
53#define VG_DEBUG_CLIQUE	   0
54
55/*------------------------------------------------------------*/
56/*--- Low-level address-space scanning, for the leak       ---*/
57/*--- detector.                                            ---*/
58/*------------------------------------------------------------*/
59
60static
61jmp_buf memscan_jmpbuf;
62
63
64static
65void scan_all_valid_memory_catcher ( Int sigNo, Addr addr )
66{
67   if (0)
68      VG_(printf)("OUCH! sig=%d addr=%#lx\n", sigNo, addr);
69   if (sigNo == VKI_SIGSEGV || sigNo == VKI_SIGBUS)
70      __builtin_longjmp(memscan_jmpbuf, 1);
71}
72
73
74/* TODO: GIVE THIS A PROPER HOME
75   TODO: MERGE THIS WITH DUPLICATE IN m_main.c and coredump-elf.c.
76   Extract from aspacem a vector of the current segment start
77   addresses.  The vector is dynamically allocated and should be freed
78   by the caller when done.  REQUIRES m_mallocfree to be running.
79   Writes the number of addresses required into *n_acquired. */
80
81static Addr* get_seg_starts ( /*OUT*/Int* n_acquired )
82{
83   Addr* starts;
84   Int   n_starts, r = 0;
85
86   n_starts = 1;
87   while (True) {
88      starts = VG_(malloc)( n_starts * sizeof(Addr) );
89      if (starts == NULL)
90         break;
91      r = VG_(am_get_segment_starts)( starts, n_starts );
92      if (r >= 0)
93         break;
94      VG_(free)(starts);
95      n_starts *= 2;
96   }
97
98   if (starts == NULL) {
99     *n_acquired = 0;
100     return NULL;
101   }
102
103   *n_acquired = r;
104   return starts;
105}
106
107
108/*------------------------------------------------------------*/
109/*--- Detecting leaked (unreachable) malloc'd blocks.      ---*/
110/*------------------------------------------------------------*/
111
112/* An entry in the mark stack */
113typedef
114   struct {
115      Int   next:30;	/* Index of next in mark stack */
116      UInt  state:2;	/* Reachedness */
117      SizeT indirect;	/* if Unreached, how much is unreachable from here */
118   }
119   MarkStack;
120
121/* Find the i such that ptr points at or inside the block described by
122   shadows[i].  Return -1 if none found.  This assumes that shadows[]
123   has been sorted on the ->data field. */
124
125#if VG_DEBUG_LEAKCHECK
126/* Used to sanity-check the fast binary-search mechanism. */
127static
128Int find_shadow_for_OLD ( Addr       ptr,
129                          MC_Chunk** shadows,
130                          Int        n_shadows )
131
132{
133   Int  i;
134   Addr a_lo, a_hi;
135   PROF_EVENT(70, "find_shadow_for_OLD");
136   for (i = 0; i < n_shadows; i++) {
137      PROF_EVENT(71, "find_shadow_for_OLD(loop)");
138      a_lo = shadows[i]->data;
139      a_hi = ((Addr)shadows[i]->data) + shadows[i]->szB;
140      if (a_lo <= ptr && ptr < a_hi)
141         return i;
142   }
143   return -1;
144}
145#endif
146
147
148static
149Int find_shadow_for ( Addr       ptr,
150                      MC_Chunk** shadows,
151                      Int        n_shadows )
152{
153   Addr a_mid_lo, a_mid_hi;
154   Int lo, mid, hi, retVal;
155   /* VG_(printf)("find shadow for %p = ", ptr); */
156   retVal = -1;
157   lo = 0;
158   hi = n_shadows-1;
159   while (True) {
160      /* invariant: current unsearched space is from lo to hi, inclusive. */
161      if (lo > hi) break; /* not found */
162
163      mid      = (lo + hi) / 2;
164      a_mid_lo = shadows[mid]->data;
165      a_mid_hi = shadows[mid]->data + shadows[mid]->szB;
166      /* Extent of block 'mid' is [a_mid_lo .. a_mid_hi).
167         Special-case zero-sized blocks - treat them as if they had
168         size 1.  Not doing so causes them to not cover any address
169         range at all and so will never be identified as the target of
170         any pointer, which causes them to be incorrectly reported as
171         definitely leaked. */
172      if (shadows[mid]->szB == 0)
173         a_mid_hi++;
174
175      if (ptr < a_mid_lo) {
176         hi = mid-1;
177         continue;
178      }
179      if (ptr >= a_mid_hi) {
180         lo = mid+1;
181         continue;
182      }
183      tl_assert(ptr >= a_mid_lo && ptr < a_mid_hi);
184      retVal = mid;
185      break;
186   }
187
188#  if VG_DEBUG_LEAKCHECK
189   tl_assert(retVal == find_shadow_for_OLD ( ptr, shadows, n_shadows ));
190#  endif
191   /* VG_(printf)("%d\n", retVal); */
192   return retVal;
193}
194
195/* Globals, for the following callback used by VG_(detect_memory_leaks). */
196static MC_Chunk** lc_shadows;
197static Int        lc_n_shadows;
198static MarkStack* lc_markstack;
199static Int	  lc_markstack_top;
200static Addr       lc_min_mallocd_addr;
201static Addr       lc_max_mallocd_addr;
202static SizeT	  lc_scanned;
203
204static Bool	  (*lc_is_within_valid_secondary) (Addr addr);
205static Bool	  (*lc_is_valid_aligned_word)     (Addr addr);
206
207
208SizeT MC_(bytes_leaked)     = 0;
209SizeT MC_(bytes_indirect)   = 0;
210SizeT MC_(bytes_dubious)    = 0;
211SizeT MC_(bytes_reachable)  = 0;
212SizeT MC_(bytes_suppressed) = 0;
213
214static Int lc_compar(void* n1, void* n2)
215{
216   MC_Chunk* mc1 = *(MC_Chunk**)n1;
217   MC_Chunk* mc2 = *(MC_Chunk**)n2;
218   if (mc1->data < mc2->data) return -1;
219   if (mc1->data > mc2->data) return  1;
220   return 0;
221}
222
223/* If ptr is pointing to a heap-allocated block which hasn't been seen
224   before, push it onto the mark stack.  Clique is the index of the
225   clique leader; -1 if none. */
226static void lc_markstack_push_WRK(Addr ptr, Int clique)
227{
228   Int sh_no;
229
230   /* quick filter */
231   if (!VG_(am_is_valid_for_client)(ptr, 1, VKI_PROT_NONE))
232      return;
233
234   sh_no = find_shadow_for(ptr, lc_shadows, lc_n_shadows);
235
236   if (VG_DEBUG_LEAKCHECK)
237      VG_(printf)("ptr=%#lx -> block %d\n", ptr, sh_no);
238
239   if (sh_no == -1)
240      return;
241
242   tl_assert(sh_no >= 0 && sh_no < lc_n_shadows);
243   tl_assert(ptr >= lc_shadows[sh_no]->data);
244   tl_assert(ptr < lc_shadows[sh_no]->data
245                   + lc_shadows[sh_no]->szB
246                   + (lc_shadows[sh_no]->szB==0  ? 1  : 0));
247
248   if (lc_markstack[sh_no].state == Unreached) {
249      if (0)
250	 VG_(printf)("pushing %#lx-%#lx\n", lc_shadows[sh_no]->data,
251		     lc_shadows[sh_no]->data + lc_shadows[sh_no]->szB);
252
253      tl_assert(lc_markstack[sh_no].next == -1);
254      lc_markstack[sh_no].next = lc_markstack_top;
255      lc_markstack_top = sh_no;
256   }
257
258   tl_assert(clique >= -1 && clique < lc_n_shadows);
259
260   if (clique != -1) {
261      if (0)
262	 VG_(printf)("mopup: %d: %#lx is %d\n",
263		     sh_no, lc_shadows[sh_no]->data, lc_markstack[sh_no].state);
264
265      /* An unmarked block - add it to the clique.  Add its size to
266	 the clique-leader's indirect size.  If the new block was
267	 itself a clique leader, it isn't any more, so add its
268	 indirect to the new clique leader.
269
270	 If this block *is* the clique leader, it means this is a
271	 cyclic structure, so none of this applies. */
272      if (lc_markstack[sh_no].state == Unreached) {
273	 lc_markstack[sh_no].state = IndirectLeak;
274
275	 if (sh_no != clique) {
276	    if (VG_DEBUG_CLIQUE) {
277	       if (lc_markstack[sh_no].indirect)
278		  VG_(printf)("  clique %d joining clique %d adding %lu+%lu bytes\n",
279			      sh_no, clique,
280			      lc_shadows[sh_no]->szB + 0UL,
281                              lc_markstack[sh_no].indirect);
282	       else
283		  VG_(printf)("  %d joining %d adding %lu\n",
284			      sh_no, clique,
285                              lc_shadows[sh_no]->szB + 0UL);
286	    }
287
288	    lc_markstack[clique].indirect += lc_shadows[sh_no]->szB;
289	    lc_markstack[clique].indirect += lc_markstack[sh_no].indirect;
290	    lc_markstack[sh_no].indirect = 0; /* shouldn't matter */
291	 }
292      }
293   } else if (ptr == lc_shadows[sh_no]->data) {
294      lc_markstack[sh_no].state = Proper;
295   } else {
296      if (lc_markstack[sh_no].state == Unreached)
297	 lc_markstack[sh_no].state = Interior;
298   }
299}
300
301static void lc_markstack_push(Addr ptr)
302{
303   lc_markstack_push_WRK(ptr, -1);
304}
305
306/* Return the top of the mark stack, if any. */
307static Int lc_markstack_pop(void)
308{
309   Int ret = lc_markstack_top;
310
311   if (ret != -1) {
312      lc_markstack_top = lc_markstack[ret].next;
313      lc_markstack[ret].next = -1;
314   }
315
316   return ret;
317}
318
319
320/* Scan a block of memory between [start, start+len).  This range may
321   be bogus, inaccessable, or otherwise strange; we deal with it.
322
323   If clique != -1, it means we're gathering leaked memory into
324   cliques, and clique is the index of the current clique leader. */
325static void lc_scan_memory_WRK(Addr start, SizeT len, Int clique)
326{
327   Addr ptr = VG_ROUNDUP(start, sizeof(Addr));
328   Addr end = VG_ROUNDDN(start+len, sizeof(Addr));
329   vki_sigset_t sigmask;
330
331   if (VG_DEBUG_LEAKCHECK)
332      VG_(printf)("scan %#lx-%#lx\n", start, start+len);
333   VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &sigmask);
334   VG_(set_fault_catcher)(scan_all_valid_memory_catcher);
335
336   //   lc_scanned += end-ptr;
337
338   if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ))
339      ptr = VG_PGROUNDUP(ptr+1);	/* first page bad */
340
341   while (ptr < end) {
342      Addr addr;
343
344      /* Skip invalid chunks */
345      if (!(*lc_is_within_valid_secondary)(ptr)) {
346	 ptr = VG_ROUNDUP(ptr+1, SM_SIZE);
347	 continue;
348      }
349
350      /* Look to see if this page seems reasonble */
351      if ((ptr % VKI_PAGE_SIZE) == 0) {
352	 if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ))
353	    ptr += VKI_PAGE_SIZE; /* bad page - skip it */
354      }
355
356      if (__builtin_setjmp(memscan_jmpbuf) == 0) {
357	 if ((*lc_is_valid_aligned_word)(ptr)) {
358            lc_scanned += sizeof(Addr);
359	    addr = *(Addr *)ptr;
360	    lc_markstack_push_WRK(addr, clique);
361	 } else if (0 && VG_DEBUG_LEAKCHECK)
362	    VG_(printf)("%#lx not valid\n", ptr);
363	 ptr += sizeof(Addr);
364      } else {
365	 /* We need to restore the signal mask, because we were
366	    longjmped out of a signal handler. */
367	 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
368
369	 ptr = VG_PGROUNDUP(ptr+1);	/* bad page - skip it */
370      }
371   }
372
373   VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
374   VG_(set_fault_catcher)(NULL);
375}
376
377
378static void lc_scan_memory(Addr start, SizeT len)
379{
380   lc_scan_memory_WRK(start, len, -1);
381}
382
383/* Process the mark stack until empty.  If mopup is true, then we're
384   actually gathering leaked blocks, so they should be marked
385   IndirectLeak. */
386static void lc_do_leakcheck(Int clique)
387{
388   Int top;
389
390   while((top = lc_markstack_pop()) != -1) {
391      tl_assert(top >= 0 && top < lc_n_shadows);
392      tl_assert(lc_markstack[top].state != Unreached);
393
394      lc_scan_memory_WRK(lc_shadows[top]->data, lc_shadows[top]->szB, clique);
395   }
396}
397
398static SizeT blocks_leaked;
399static SizeT blocks_indirect;
400static SizeT blocks_dubious;
401static SizeT blocks_reachable;
402static SizeT blocks_suppressed;
403
404static void full_report(ThreadId tid)
405{
406   Int i;
407   Int    n_lossrecords;
408   LossRecord* errlist;
409   LossRecord* p;
410   Bool   is_suppressed;
411
412   /* Go through and group lost structures into cliques.  For each
413      Unreached block, push it onto the mark stack, and find all the
414      blocks linked to it.  These are marked IndirectLeak, and their
415      size is added to the clique leader's indirect size.  If one of
416      the found blocks was itself a clique leader (from a previous
417      pass), then the cliques are merged. */
418   for (i = 0; i < lc_n_shadows; i++) {
419      if (VG_DEBUG_CLIQUE)
420	 VG_(printf)("cliques: %d at %#lx -> Loss state %d\n",
421		     i, lc_shadows[i]->data, lc_markstack[i].state);
422      if (lc_markstack[i].state != Unreached)
423	 continue;
424
425      tl_assert(lc_markstack_top == -1);
426
427      if (VG_DEBUG_CLIQUE)
428	 VG_(printf)("%d: gathering clique %#lx\n", i, lc_shadows[i]->data);
429
430      lc_markstack_push_WRK(lc_shadows[i]->data, i);
431
432      lc_do_leakcheck(i);
433
434      tl_assert(lc_markstack_top == -1);
435      tl_assert(lc_markstack[i].state == IndirectLeak
436                /* jrs 20051218: Ashley Pittman supplied a
437                   custom-allocator test program which causes the ==
438                   IndirectLeak condition to fail - it causes .state
439                   to be Unreached.  Since I have no idea how this
440                   clique stuff works and no time to figure it out,
441                   just allow that condition too.  This could well be
442                   a completely bogus fix.  It doesn't seem unsafe
443                   given that in any case the .state field is
444                   immediately overwritten by the next statement. */
445                || lc_markstack[i].state == Unreached);
446
447      lc_markstack[i].state = Unreached; /* Return to unreached state,
448					    to indicate its a clique
449					    leader */
450   }
451
452   /* Common up the lost blocks so we can print sensible error messages. */
453   n_lossrecords = 0;
454   errlist       = NULL;
455   for (i = 0; i < lc_n_shadows; i++) {
456      ExeContext* where = lc_shadows[i]->where;
457
458      for (p = errlist; p != NULL; p = p->next) {
459         if (p->loss_mode == lc_markstack[i].state
460             && VG_(eq_ExeContext) ( MC_(clo_leak_resolution),
461                                     p->allocated_at,
462                                     where) ) {
463            break;
464	 }
465      }
466      if (p != NULL) {
467         p->num_blocks  ++;
468         p->total_bytes += lc_shadows[i]->szB;
469	 p->indirect_bytes += lc_markstack[i].indirect;
470      } else {
471         n_lossrecords ++;
472         p = VG_(malloc)(sizeof(LossRecord));
473         p->loss_mode    = lc_markstack[i].state;
474         p->allocated_at = where;
475         p->total_bytes  = lc_shadows[i]->szB;
476	 p->indirect_bytes = lc_markstack[i].indirect;
477         p->num_blocks   = 1;
478         p->next         = errlist;
479         errlist         = p;
480      }
481   }
482
483   /* Print out the commoned-up blocks and collect summary stats. */
484   for (i = 0; i < n_lossrecords; i++) {
485      Bool        print_record;
486      LossRecord* p_min = NULL;
487      SizeT       n_min = ~(0x0L);
488      for (p = errlist; p != NULL; p = p->next) {
489         if (p->num_blocks > 0 && p->total_bytes < n_min) {
490            n_min = p->total_bytes + p->indirect_bytes;
491            p_min = p;
492         }
493      }
494      tl_assert(p_min != NULL);
495
496      /* Ok to have tst==NULL;  it's only used if --gdb-attach=yes, and
497         we disallow that when --leak-check=yes.
498
499         Prints the error if not suppressed, unless it's reachable (Proper
500         or IndirectLeak) and --show-reachable=no */
501
502      print_record = ( MC_(clo_show_reachable) ||
503		       Unreached == p_min->loss_mode ||
504                       Interior == p_min->loss_mode );
505
506      // Nb: because VG_(unique_error) does all the error processing
507      // immediately, and doesn't save the error, leakExtra can be
508      // stack-allocated.
509      is_suppressed =
510         MC_(record_leak_error) ( tid, i+1, n_lossrecords, p_min,
511                                  print_record );
512
513      if (is_suppressed) {
514         blocks_suppressed     += p_min->num_blocks;
515         MC_(bytes_suppressed) += p_min->total_bytes;
516
517      } else if (Unreached == p_min->loss_mode) {
518         blocks_leaked       += p_min->num_blocks;
519         MC_(bytes_leaked)   += p_min->total_bytes;
520
521      } else if (IndirectLeak == p_min->loss_mode) {
522         blocks_indirect     += p_min->num_blocks;
523         MC_(bytes_indirect) += p_min->total_bytes;
524
525      } else if (Interior   == p_min->loss_mode) {
526         blocks_dubious     += p_min->num_blocks;
527         MC_(bytes_dubious) += p_min->total_bytes;
528
529      } else if (Proper       == p_min->loss_mode) {
530         blocks_reachable     += p_min->num_blocks;
531         MC_(bytes_reachable) += p_min->total_bytes;
532
533      } else {
534         VG_(tool_panic)("generic_detect_memory_leaks: unknown loss mode");
535      }
536      p_min->num_blocks = 0;
537   }
538}
539
540/* Compute a quick summary of the leak check. */
541static void make_summary(void)
542{
543   Int i;
544
545   for(i = 0; i < lc_n_shadows; i++) {
546      SizeT size = lc_shadows[i]->szB;
547
548      switch(lc_markstack[i].state) {
549      case Unreached:
550	 blocks_leaked++;
551	 MC_(bytes_leaked) += size;
552	 break;
553
554      case Proper:
555	 blocks_reachable++;
556	 MC_(bytes_reachable) += size;
557	 break;
558
559      case Interior:
560	 blocks_dubious++;
561	 MC_(bytes_dubious) += size;
562	 break;
563
564      case IndirectLeak:	/* shouldn't happen */
565	 blocks_indirect++;
566	 MC_(bytes_indirect) += size;
567	 break;
568      }
569   }
570}
571
572static MC_Chunk**
573find_active_shadows(UInt* n_shadows)
574{
575   /* Our goal is to construct a set of shadows that includes every
576    * mempool chunk, and every malloc region that *doesn't* contain a
577    * mempool chunk. We do this in several phases.
578    *
579    * First we collect all the malloc chunks into an array and sort it.
580    * We do this because we want to query the chunks by interior
581    * pointers, requiring binary search.
582    *
583    * Second we build an array containing a Bool for each malloc chunk,
584    * indicating whether it contains any mempools.
585    *
586    * Third we loop over the mempool tables. For each chunk in each
587    * pool, we set the entry in the Bool array corresponding to the
588    * malloc chunk containing the mempool chunk.
589    *
590    * Finally we copy the mempool chunks and the non-marked malloc
591    * chunks into a combined array of shadows, free our temporaries,
592    * and return the combined array.
593    */
594
595   MC_Mempool *mp;
596   MC_Chunk **mallocs, **shadows, *mc;
597   UInt n_mallocs, m, s;
598   Bool *malloc_chunk_holds_a_pool_chunk;
599
600   mallocs = (MC_Chunk**) VG_(HT_to_array)( MC_(malloc_list), &n_mallocs );
601
602   if (n_mallocs == 0) {
603      tl_assert(mallocs == NULL);
604      *n_shadows = 0;
605      return NULL;
606   }
607
608   VG_(ssort)((void*)mallocs, n_mallocs,
609              sizeof(VgHashNode*), lc_compar);
610
611   malloc_chunk_holds_a_pool_chunk = VG_(calloc)( n_mallocs, sizeof(Bool) );
612
613   *n_shadows = n_mallocs;
614
615   VG_(HT_ResetIter)(MC_(mempool_list));
616   while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
617      VG_(HT_ResetIter)(mp->chunks);
618      while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
619
620         /* We'll need a shadow for this chunk. */
621         ++(*n_shadows);
622
623         /* Possibly invalidate the malloc holding the beginning of this chunk. */
624         m = find_shadow_for(mc->data, mallocs, n_mallocs);
625         if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) {
626            tl_assert(*n_shadows > 0);
627            --(*n_shadows);
628            malloc_chunk_holds_a_pool_chunk[m] = True;
629         }
630
631         /* Possibly invalidate the malloc holding the end of this chunk. */
632         if (mc->szB > 1) {
633            m = find_shadow_for(mc->data + (mc->szB - 1), mallocs, n_mallocs);
634            if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) {
635               tl_assert(*n_shadows > 0);
636               --(*n_shadows);
637               malloc_chunk_holds_a_pool_chunk[m] = True;
638            }
639         }
640      }
641   }
642
643   tl_assert(*n_shadows > 0);
644   shadows = VG_(malloc)(sizeof(VgHashNode*) * (*n_shadows));
645   s = 0;
646
647   /* Copy the mempool chunks into the final array. */
648   VG_(HT_ResetIter)(MC_(mempool_list));
649   while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
650      VG_(HT_ResetIter)(mp->chunks);
651      while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
652         tl_assert(s < *n_shadows);
653         shadows[s++] = mc;
654      }
655   }
656
657   /* Copy the malloc chunks into the final array. */
658   for (m = 0; m < n_mallocs; ++m) {
659      if (!malloc_chunk_holds_a_pool_chunk[m]) {
660         tl_assert(s < *n_shadows);
661         shadows[s++] = mallocs[m];
662      }
663   }
664
665   tl_assert(s == *n_shadows);
666   VG_(free)(mallocs);
667   VG_(free)(malloc_chunk_holds_a_pool_chunk);
668
669   return shadows;
670}
671
672
673/* Top level entry point to leak detector.  Call here, passing in
674   suitable address-validating functions (see comment at top of
675   scan_all_valid_memory above).  These functions used to encapsulate the
676   differences between Memcheck and Addrcheck;  they no longer do but it
677   doesn't hurt to keep them here.
678*/
679void MC_(do_detect_memory_leaks) (
680   ThreadId tid, LeakCheckMode mode,
681   Bool (*is_within_valid_secondary) ( Addr ),
682   Bool (*is_valid_aligned_word)     ( Addr )
683)
684{
685   Int i;
686
687   tl_assert(mode != LC_Off);
688
689   lc_shadows = find_active_shadows(&lc_n_shadows);
690
691   /* Sort the array. */
692   VG_(ssort)((void*)lc_shadows, lc_n_shadows, sizeof(VgHashNode*), lc_compar);
693
694   /* Sanity check; assert that the blocks are now in order */
695   for (i = 0; i < lc_n_shadows-1; i++) {
696      tl_assert( lc_shadows[i]->data <= lc_shadows[i+1]->data);
697   }
698
699   /* Sanity check -- make sure they don't overlap.  But do allow
700      exact duplicates.  If this assertion fails, it may mean that the
701      application has done something stupid with
702      VALGRIND_MALLOCLIKE_BLOCK client requests, specifically, has
703      made overlapping requests (which are nonsensical).  Another way
704      to screw up is to use VALGRIND_MALLOCLIKE_BLOCK for stack
705      locations; again nonsensical. */
706   for (i = 0; i < lc_n_shadows-1; i++) {
707      tl_assert( /* normal case - no overlap */
708                 (lc_shadows[i]->data + lc_shadows[i]->szB
709                  <= lc_shadows[i+1]->data )
710                 ||
711                 /* degenerate case: exact duplicates */
712                 (lc_shadows[i]->data == lc_shadows[i+1]->data
713                  && lc_shadows[i]->szB == lc_shadows[i+1]->szB)
714               );
715   }
716
717   if (lc_n_shadows == 0) {
718      tl_assert(lc_shadows == NULL);
719      if (VG_(clo_verbosity) >= 1 && !VG_(clo_xml)) {
720         VG_(message)(Vg_UserMsg,
721                      "All heap blocks were freed -- no leaks are possible.");
722      }
723      return;
724   }
725
726   if (VG_(clo_verbosity) > 0 && !VG_(clo_xml))
727      VG_(message)(Vg_UserMsg,
728                   "searching for pointers to %'d not-freed blocks.",
729                   lc_n_shadows );
730
731   lc_min_mallocd_addr = lc_shadows[0]->data;
732   lc_max_mallocd_addr = lc_shadows[lc_n_shadows-1]->data
733                         + lc_shadows[lc_n_shadows-1]->szB;
734
735   lc_markstack = VG_(malloc)( lc_n_shadows * sizeof(*lc_markstack) );
736   for (i = 0; i < lc_n_shadows; i++) {
737      lc_markstack[i].next = -1;
738      lc_markstack[i].state = Unreached;
739      lc_markstack[i].indirect = 0;
740   }
741   lc_markstack_top = -1;
742
743   lc_is_within_valid_secondary = is_within_valid_secondary;
744   lc_is_valid_aligned_word     = is_valid_aligned_word;
745
746   lc_scanned = 0;
747
748   /* Push roots onto the mark stack.  Roots are:
749      - the integer registers of all threads
750      - all mappings belonging to the client, including stacks
751      - .. but excluding any client heap segments.
752      Client heap segments are excluded because we wish to differentiate
753      client heap blocks which are referenced only from inside the heap
754      from those outside.  This facilitates the indirect vs direct loss
755      categorisation, which [if the users ever manage to understand it]
756      is really useful for detecting lost cycles.
757   */
758   { Addr*     seg_starts;
759     Int       n_seg_starts;
760     seg_starts = get_seg_starts( &n_seg_starts );
761     tl_assert(seg_starts && n_seg_starts > 0);
762     /* VG_(am_show_nsegments)( 0,"leakcheck"); */
763     for (i = 0; i < n_seg_starts; i++) {
764        NSegment const* seg = VG_(am_find_nsegment)( seg_starts[i] );
765        tl_assert(seg);
766        if (seg->kind != SkFileC && seg->kind != SkAnonC)
767           continue;
768        if (!(seg->hasR && seg->hasW))
769           continue;
770        if (seg->isCH)
771           continue;
772
773        /* Don't poke around in device segments as this may cause
774           hangs.  Exclude /dev/zero just in case someone allocated
775           memory by explicitly mapping /dev/zero. */
776        if (seg->kind == SkFileC
777            && (VKI_S_ISCHR(seg->mode) || VKI_S_ISBLK(seg->mode))) {
778           HChar* dev_name = VG_(am_get_filename)( (NSegment*)seg );
779           if (dev_name && 0 == VG_(strcmp)(dev_name, "/dev/zero")) {
780              /* don't skip /dev/zero */
781           } else {
782              /* skip this device mapping */
783              continue;
784           }
785        }
786
787        if (0)
788           VG_(printf)("ACCEPT %2d  %#lx %#lx\n", i, seg->start, seg->end);
789        lc_scan_memory(seg->start, seg->end+1 - seg->start);
790     }
791   }
792
793   /* Push registers onto mark stack */
794   VG_(apply_to_GP_regs)(lc_markstack_push);
795
796   /* Keep walking the heap until everything is found */
797   lc_do_leakcheck(-1);
798
799   if (VG_(clo_verbosity) > 0 && !VG_(clo_xml))
800      VG_(message)(Vg_UserMsg, "checked %'lu bytes.", lc_scanned);
801
802   blocks_leaked     = MC_(bytes_leaked)     = 0;
803   blocks_indirect   = MC_(bytes_indirect)   = 0;
804   blocks_dubious    = MC_(bytes_dubious)    = 0;
805   blocks_reachable  = MC_(bytes_reachable)  = 0;
806   blocks_suppressed = MC_(bytes_suppressed) = 0;
807
808   if (mode == LC_Full)
809      full_report(tid);
810   else
811      make_summary();
812
813   if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
814      VG_(message)(Vg_UserMsg, "");
815      VG_(message)(Vg_UserMsg, "LEAK SUMMARY:");
816      VG_(message)(Vg_UserMsg, "   definitely lost: %'lu bytes in %'lu blocks.",
817                               MC_(bytes_leaked), blocks_leaked );
818      if (blocks_indirect > 0)
819	 VG_(message)(Vg_UserMsg, "   indirectly lost: %'lu bytes in %'lu blocks.",
820		      MC_(bytes_indirect), blocks_indirect );
821      VG_(message)(Vg_UserMsg, "     possibly lost: %'lu bytes in %'lu blocks.",
822                               MC_(bytes_dubious), blocks_dubious );
823      VG_(message)(Vg_UserMsg, "   still reachable: %'lu bytes in %'lu blocks.",
824                               MC_(bytes_reachable), blocks_reachable );
825      VG_(message)(Vg_UserMsg, "        suppressed: %'lu bytes in %'lu blocks.",
826                               MC_(bytes_suppressed), blocks_suppressed );
827      if (mode == LC_Summary
828          && (blocks_leaked + blocks_indirect
829              + blocks_dubious + blocks_reachable) > 0) {
830         VG_(message)(Vg_UserMsg,
831                      "Rerun with --leak-check=full to see details of leaked memory.");
832      }
833      if (blocks_reachable > 0 && !MC_(clo_show_reachable) && mode == LC_Full) {
834         VG_(message)(Vg_UserMsg,
835           "Reachable blocks (those to which a pointer was found) are not shown.");
836         VG_(message)(Vg_UserMsg,
837            "To see them, rerun with: --leak-check=full --show-reachable=yes");
838      }
839   }
840
841   VG_(free) ( lc_shadows );
842   VG_(free) ( lc_markstack );
843}
844
845/*--------------------------------------------------------------------*/
846/*--- end                                                          ---*/
847/*--------------------------------------------------------------------*/
848
849