mc_leakcheck.c revision 8df80b22d8f0ed19fb2d7ac7267b8015b202f661
1
2/*--------------------------------------------------------------------*/
3/*--- The leak checker.                             mc_leakcheck.c ---*/
4/*--------------------------------------------------------------------*/
5
6/*
7   This file is part of MemCheck, a heavyweight Valgrind tool for
8   detecting memory errors.
9
10   Copyright (C) 2000-2008 Julian Seward
11      jseward@acm.org
12
13   This program is free software; you can redistribute it and/or
14   modify it under the terms of the GNU General Public License as
15   published by the Free Software Foundation; either version 2 of the
16   License, or (at your option) any later version.
17
18   This program is distributed in the hope that it will be useful, but
19   WITHOUT ANY WARRANTY; without even the implied warranty of
20   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21   General Public License for more details.
22
23   You should have received a copy of the GNU General Public License
24   along with this program; if not, write to the Free Software
25   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26   02111-1307, USA.
27
28   The GNU General Public License is contained in the file COPYING.
29*/
30
31#include "pub_tool_basics.h"
32#include "pub_tool_vki.h"
33#include "pub_tool_aspacemgr.h"
34#include "pub_tool_execontext.h"
35#include "pub_tool_hashtable.h"
36#include "pub_tool_libcbase.h"
37#include "pub_tool_libcassert.h"
38#include "pub_tool_libcprint.h"
39#include "pub_tool_libcsignal.h"
40#include "pub_tool_machine.h"
41#include "pub_tool_mallocfree.h"
42#include "pub_tool_options.h"
43#include "pub_tool_signals.h"
44#include "pub_tool_tooliface.h"     // Needed for mc_include.h
45
46#include "mc_include.h"
47
48#include <setjmp.h>                 // For jmp_buf
49
50
51/* Define to debug the memory-leak-detector. */
52#define VG_DEBUG_LEAKCHECK 0
53#define VG_DEBUG_CLIQUE	   0
54
55/*------------------------------------------------------------*/
56/*--- Low-level address-space scanning, for the leak       ---*/
57/*--- detector.                                            ---*/
58/*------------------------------------------------------------*/
59
60static
61jmp_buf memscan_jmpbuf;
62
63
64static
65void scan_all_valid_memory_catcher ( Int sigNo, Addr addr )
66{
67   if (0)
68      VG_(printf)("OUCH! sig=%d addr=%#lx\n", sigNo, addr);
69   if (sigNo == VKI_SIGSEGV || sigNo == VKI_SIGBUS)
70      __builtin_longjmp(memscan_jmpbuf, 1);
71}
72
73
74/* TODO: GIVE THIS A PROPER HOME
75   TODO: MERGE THIS WITH DUPLICATE IN m_main.c and coredump-elf.c.
76   Extract from aspacem a vector of the current segment start
77   addresses.  The vector is dynamically allocated and should be freed
78   by the caller when done.  REQUIRES m_mallocfree to be running.
79   Writes the number of addresses required into *n_acquired. */
80
81static Addr* get_seg_starts ( /*OUT*/Int* n_acquired )
82{
83   Addr* starts;
84   Int   n_starts, r = 0;
85
86   n_starts = 1;
87   while (True) {
88      starts = VG_(malloc)( "mc.gss.1", n_starts * sizeof(Addr) );
89      if (starts == NULL)
90         break;
91      r = VG_(am_get_segment_starts)( starts, n_starts );
92      if (r >= 0)
93         break;
94      VG_(free)(starts);
95      n_starts *= 2;
96   }
97
98   if (starts == NULL) {
99     *n_acquired = 0;
100     return NULL;
101   }
102
103   *n_acquired = r;
104   return starts;
105}
106
107
108/*------------------------------------------------------------*/
109/*--- Detecting leaked (unreachable) malloc'd blocks.      ---*/
110/*------------------------------------------------------------*/
111
112/* An entry in the mark stack */
113typedef
114   struct {
115      Int   next:30;	/* Index of next in mark stack */
116      UInt  state:2;	/* Reachedness */
117      SizeT indirect;	/* if Unreached, how much is unreachable from here */
118   }
119   MarkStack;
120
121/* Find the i such that ptr points at or inside the block described by
122   shadows[i].  Return -1 if none found.  This assumes that shadows[]
123   has been sorted on the ->data field. */
124
125#if VG_DEBUG_LEAKCHECK
126/* Used to sanity-check the fast binary-search mechanism. */
127static
128Int find_shadow_for_OLD ( Addr       ptr,
129                          MC_Chunk** shadows,
130                          Int        n_shadows )
131
132{
133   Int  i;
134   Addr a_lo, a_hi;
135   PROF_EVENT(70, "find_shadow_for_OLD");
136   for (i = 0; i < n_shadows; i++) {
137      PROF_EVENT(71, "find_shadow_for_OLD(loop)");
138      a_lo = shadows[i]->data;
139      a_hi = ((Addr)shadows[i]->data) + shadows[i]->szB;
140      if (a_lo <= ptr && ptr < a_hi)
141         return i;
142   }
143   return -1;
144}
145#endif
146
147
148static
149Int find_shadow_for ( Addr       ptr,
150                      MC_Chunk** shadows,
151                      Int        n_shadows )
152{
153   Addr a_mid_lo, a_mid_hi;
154   Int lo, mid, hi, retVal;
155   /* VG_(printf)("find shadow for %p = ", ptr); */
156   retVal = -1;
157   lo = 0;
158   hi = n_shadows-1;
159   while (True) {
160      /* invariant: current unsearched space is from lo to hi, inclusive. */
161      if (lo > hi) break; /* not found */
162
163      mid      = (lo + hi) / 2;
164      a_mid_lo = shadows[mid]->data;
165      a_mid_hi = shadows[mid]->data + shadows[mid]->szB;
166      /* Extent of block 'mid' is [a_mid_lo .. a_mid_hi).
167         Special-case zero-sized blocks - treat them as if they had
168         size 1.  Not doing so causes them to not cover any address
169         range at all and so will never be identified as the target of
170         any pointer, which causes them to be incorrectly reported as
171         definitely leaked. */
172      if (shadows[mid]->szB == 0)
173         a_mid_hi++;
174
175      if (ptr < a_mid_lo) {
176         hi = mid-1;
177         continue;
178      }
179      if (ptr >= a_mid_hi) {
180         lo = mid+1;
181         continue;
182      }
183      tl_assert(ptr >= a_mid_lo && ptr < a_mid_hi);
184      retVal = mid;
185      break;
186   }
187
188#  if VG_DEBUG_LEAKCHECK
189   tl_assert(retVal == find_shadow_for_OLD ( ptr, shadows, n_shadows ));
190#  endif
191   /* VG_(printf)("%d\n", retVal); */
192   return retVal;
193}
194
195/* Globals, for the following callback used by VG_(detect_memory_leaks). */
196static MC_Chunk** lc_shadows;
197static Int        lc_n_shadows;
198static MarkStack* lc_markstack;
199static Int	  lc_markstack_top;
200static Addr       lc_min_mallocd_addr;
201static Addr       lc_max_mallocd_addr;
202static SizeT	  lc_scanned;
203
204static Bool	  (*lc_is_within_valid_secondary) (Addr addr);
205static Bool	  (*lc_is_valid_aligned_word)     (Addr addr);
206
207
208SizeT MC_(bytes_leaked)     = 0;
209SizeT MC_(bytes_indirect)   = 0;
210SizeT MC_(bytes_dubious)    = 0;
211SizeT MC_(bytes_reachable)  = 0;
212SizeT MC_(bytes_suppressed) = 0;
213
214SizeT MC_(blocks_leaked)     = 0;
215SizeT MC_(blocks_indirect)   = 0;
216SizeT MC_(blocks_dubious)    = 0;
217SizeT MC_(blocks_reachable)  = 0;
218SizeT MC_(blocks_suppressed) = 0;
219
220static Int lc_compar(void* n1, void* n2)
221{
222   MC_Chunk* mc1 = *(MC_Chunk**)n1;
223   MC_Chunk* mc2 = *(MC_Chunk**)n2;
224   if (mc1->data < mc2->data) return -1;
225   if (mc1->data > mc2->data) return  1;
226   return 0;
227}
228
229/* If ptr is pointing to a heap-allocated block which hasn't been seen
230   before, push it onto the mark stack.  Clique is the index of the
231   clique leader; -1 if none. */
232static void lc_markstack_push_WRK(Addr ptr, Int clique)
233{
234   Int sh_no;
235
236   /* quick filter */
237   if (!VG_(am_is_valid_for_client)(ptr, 1, VKI_PROT_NONE))
238      return;
239
240   sh_no = find_shadow_for(ptr, lc_shadows, lc_n_shadows);
241
242   if (VG_DEBUG_LEAKCHECK)
243      VG_(printf)("ptr=%#lx -> block %d\n", ptr, sh_no);
244
245   if (sh_no == -1)
246      return;
247
248   tl_assert(sh_no >= 0 && sh_no < lc_n_shadows);
249   tl_assert(ptr >= lc_shadows[sh_no]->data);
250   tl_assert(ptr < lc_shadows[sh_no]->data
251                   + lc_shadows[sh_no]->szB
252                   + (lc_shadows[sh_no]->szB==0  ? 1  : 0));
253
254   if (lc_markstack[sh_no].state == Unreached) {
255      if (0)
256	 VG_(printf)("pushing %#lx-%#lx\n", lc_shadows[sh_no]->data,
257		     lc_shadows[sh_no]->data + lc_shadows[sh_no]->szB);
258
259      tl_assert(lc_markstack[sh_no].next == -1);
260      lc_markstack[sh_no].next = lc_markstack_top;
261      lc_markstack_top = sh_no;
262   }
263
264   tl_assert(clique >= -1 && clique < lc_n_shadows);
265
266   if (clique != -1) {
267      if (0)
268	 VG_(printf)("mopup: %d: %#lx is %d\n",
269		     sh_no, lc_shadows[sh_no]->data, lc_markstack[sh_no].state);
270
271      /* An unmarked block - add it to the clique.  Add its size to
272	 the clique-leader's indirect size.  If the new block was
273	 itself a clique leader, it isn't any more, so add its
274	 indirect to the new clique leader.
275
276	 If this block *is* the clique leader, it means this is a
277	 cyclic structure, so none of this applies. */
278      if (lc_markstack[sh_no].state == Unreached) {
279	 lc_markstack[sh_no].state = IndirectLeak;
280
281	 if (sh_no != clique) {
282	    if (VG_DEBUG_CLIQUE) {
283	       if (lc_markstack[sh_no].indirect)
284		  VG_(printf)("  clique %d joining clique %d adding %lu+%lu bytes\n",
285			      sh_no, clique,
286			      lc_shadows[sh_no]->szB + 0UL,
287                              lc_markstack[sh_no].indirect);
288	       else
289		  VG_(printf)("  %d joining %d adding %lu\n",
290			      sh_no, clique,
291                              lc_shadows[sh_no]->szB + 0UL);
292	    }
293
294	    lc_markstack[clique].indirect += lc_shadows[sh_no]->szB;
295	    lc_markstack[clique].indirect += lc_markstack[sh_no].indirect;
296	    lc_markstack[sh_no].indirect = 0; /* shouldn't matter */
297	 }
298      }
299   } else if (ptr == lc_shadows[sh_no]->data) {
300      lc_markstack[sh_no].state = Proper;
301   } else {
302      if (lc_markstack[sh_no].state == Unreached)
303	 lc_markstack[sh_no].state = Interior;
304   }
305}
306
307static void lc_markstack_push(Addr ptr)
308{
309   lc_markstack_push_WRK(ptr, -1);
310}
311
312/* Return the top of the mark stack, if any. */
313static Int lc_markstack_pop(void)
314{
315   Int ret = lc_markstack_top;
316
317   if (ret != -1) {
318      lc_markstack_top = lc_markstack[ret].next;
319      lc_markstack[ret].next = -1;
320   }
321
322   return ret;
323}
324
325
326/* Scan a block of memory between [start, start+len).  This range may
327   be bogus, inaccessable, or otherwise strange; we deal with it.
328
329   If clique != -1, it means we're gathering leaked memory into
330   cliques, and clique is the index of the current clique leader. */
331static void lc_scan_memory_WRK(Addr start, SizeT len, Int clique)
332{
333   Addr ptr = VG_ROUNDUP(start, sizeof(Addr));
334   Addr end = VG_ROUNDDN(start+len, sizeof(Addr));
335   vki_sigset_t sigmask;
336
337   if (VG_DEBUG_LEAKCHECK)
338      VG_(printf)("scan %#lx-%#lx\n", start, start+len);
339   VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &sigmask);
340   VG_(set_fault_catcher)(scan_all_valid_memory_catcher);
341
342   //   lc_scanned += end-ptr;
343
344   if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ))
345      ptr = VG_PGROUNDUP(ptr+1);	/* first page bad */
346
347   while (ptr < end) {
348      Addr addr;
349
350      /* Skip invalid chunks */
351      if (!(*lc_is_within_valid_secondary)(ptr)) {
352	 ptr = VG_ROUNDUP(ptr+1, SM_SIZE);
353	 continue;
354      }
355
356      /* Look to see if this page seems reasonble */
357      if ((ptr % VKI_PAGE_SIZE) == 0) {
358	 if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ))
359	    ptr += VKI_PAGE_SIZE; /* bad page - skip it */
360      }
361
362      if (__builtin_setjmp(memscan_jmpbuf) == 0) {
363	 if ((*lc_is_valid_aligned_word)(ptr)) {
364            lc_scanned += sizeof(Addr);
365	    addr = *(Addr *)ptr;
366	    lc_markstack_push_WRK(addr, clique);
367	 } else if (0 && VG_DEBUG_LEAKCHECK)
368	    VG_(printf)("%#lx not valid\n", ptr);
369	 ptr += sizeof(Addr);
370      } else {
371	 /* We need to restore the signal mask, because we were
372	    longjmped out of a signal handler. */
373	 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
374
375	 ptr = VG_PGROUNDUP(ptr+1);	/* bad page - skip it */
376      }
377   }
378
379   VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
380   VG_(set_fault_catcher)(NULL);
381}
382
383
384static void lc_scan_memory(Addr start, SizeT len)
385{
386   if (VG_(clo_verbosity) > 2) {
387      VG_(message)(Vg_DebugMsg, "  Scanning segment: %#lx..%#lx (%ld)",
388                   start, start+len-1, len);
389   }
390   lc_scan_memory_WRK(start, len, -1);
391}
392
393/* Process the mark stack until empty.  If mopup is true, then we're
394   actually gathering leaked blocks, so they should be marked
395   IndirectLeak. */
396static void lc_do_leakcheck(Int clique)
397{
398   Int top;
399
400   while((top = lc_markstack_pop()) != -1) {
401      tl_assert(top >= 0 && top < lc_n_shadows);
402      tl_assert(lc_markstack[top].state != Unreached);
403
404      lc_scan_memory_WRK(lc_shadows[top]->data, lc_shadows[top]->szB, clique);
405   }
406}
407
408static void full_report(ThreadId tid)
409{
410   Int i;
411   Int    n_lossrecords;
412   LossRecord* errlist;
413   LossRecord* p;
414   Bool   is_suppressed;
415
416   /* Go through and group lost structures into cliques.  For each
417      Unreached block, push it onto the mark stack, and find all the
418      blocks linked to it.  These are marked IndirectLeak, and their
419      size is added to the clique leader's indirect size.  If one of
420      the found blocks was itself a clique leader (from a previous
421      pass), then the cliques are merged. */
422   for (i = 0; i < lc_n_shadows; i++) {
423      if (VG_DEBUG_CLIQUE)
424	 VG_(printf)("cliques: %d at %#lx -> Loss state %d\n",
425		     i, lc_shadows[i]->data, lc_markstack[i].state);
426      if (lc_markstack[i].state != Unreached)
427	 continue;
428
429      tl_assert(lc_markstack_top == -1);
430
431      if (VG_DEBUG_CLIQUE)
432	 VG_(printf)("%d: gathering clique %#lx\n", i, lc_shadows[i]->data);
433
434      lc_markstack_push_WRK(lc_shadows[i]->data, i);
435
436      lc_do_leakcheck(i);
437
438      tl_assert(lc_markstack_top == -1);
439      tl_assert(lc_markstack[i].state == IndirectLeak
440                /* jrs 20051218: Ashley Pittman supplied a
441                   custom-allocator test program which causes the ==
442                   IndirectLeak condition to fail - it causes .state
443                   to be Unreached.  Since I have no idea how this
444                   clique stuff works and no time to figure it out,
445                   just allow that condition too.  This could well be
446                   a completely bogus fix.  It doesn't seem unsafe
447                   given that in any case the .state field is
448                   immediately overwritten by the next statement. */
449                || lc_markstack[i].state == Unreached);
450
451      lc_markstack[i].state = Unreached; /* Return to unreached state,
452					    to indicate its a clique
453					    leader */
454   }
455
456   /* Common up the lost blocks so we can print sensible error messages. */
457   n_lossrecords = 0;
458   errlist       = NULL;
459   for (i = 0; i < lc_n_shadows; i++) {
460      ExeContext* where = lc_shadows[i]->where;
461
462      for (p = errlist; p != NULL; p = p->next) {
463         if (p->loss_mode == lc_markstack[i].state
464             && VG_(eq_ExeContext) ( MC_(clo_leak_resolution),
465                                     p->allocated_at,
466                                     where) ) {
467            break;
468	 }
469      }
470      if (p != NULL) {
471         p->num_blocks  ++;
472         p->total_bytes += lc_shadows[i]->szB;
473	 p->indirect_bytes += lc_markstack[i].indirect;
474      } else {
475         n_lossrecords ++;
476         p = VG_(malloc)( "mc.fr.1", sizeof(LossRecord));
477         p->loss_mode    = lc_markstack[i].state;
478         p->allocated_at = where;
479         p->total_bytes  = lc_shadows[i]->szB;
480	 p->indirect_bytes = lc_markstack[i].indirect;
481         p->num_blocks   = 1;
482         p->next         = errlist;
483         errlist         = p;
484      }
485   }
486
487   /* Print out the commoned-up blocks and collect summary stats. */
488   for (i = 0; i < n_lossrecords; i++) {
489      Bool        print_record;
490      LossRecord* p_min = NULL;
491      SizeT       n_min = ~(0x0L);
492      for (p = errlist; p != NULL; p = p->next) {
493         if (p->num_blocks > 0 && p->total_bytes < n_min) {
494            n_min = p->total_bytes + p->indirect_bytes;
495            p_min = p;
496         }
497      }
498      tl_assert(p_min != NULL);
499
500      /* Ok to have tst==NULL;  it's only used if --gdb-attach=yes, and
501         we disallow that when --leak-check=yes.
502
503         Prints the error if not suppressed, unless it's reachable (Proper
504         or IndirectLeak) and --show-reachable=no */
505
506      print_record = ( MC_(clo_show_reachable) ||
507		       Unreached == p_min->loss_mode ||
508                       Interior == p_min->loss_mode );
509
510      // Nb: because VG_(unique_error) does all the error processing
511      // immediately, and doesn't save the error, leakExtra can be
512      // stack-allocated.
513      is_suppressed =
514         MC_(record_leak_error) ( tid, i+1, n_lossrecords, p_min,
515                                  print_record );
516
517      if (is_suppressed) {
518         MC_(blocks_suppressed) += p_min->num_blocks;
519         MC_(bytes_suppressed)  += p_min->total_bytes;
520
521      } else if (Unreached == p_min->loss_mode) {
522         MC_(blocks_leaked) += p_min->num_blocks;
523         MC_(bytes_leaked)  += p_min->total_bytes;
524
525      } else if (IndirectLeak == p_min->loss_mode) {
526         MC_(blocks_indirect) += p_min->num_blocks;
527         MC_(bytes_indirect)  += p_min->total_bytes;
528
529      } else if (Interior == p_min->loss_mode) {
530         MC_(blocks_dubious) += p_min->num_blocks;
531         MC_(bytes_dubious)  += p_min->total_bytes;
532
533      } else if (Proper == p_min->loss_mode) {
534         MC_(blocks_reachable) += p_min->num_blocks;
535         MC_(bytes_reachable)  += p_min->total_bytes;
536
537      } else {
538         VG_(tool_panic)("generic_detect_memory_leaks: unknown loss mode");
539      }
540      p_min->num_blocks = 0;
541   }
542}
543
544/* Compute a quick summary of the leak check. */
545static void make_summary(void)
546{
547   Int i;
548
549   for(i = 0; i < lc_n_shadows; i++) {
550      SizeT size = lc_shadows[i]->szB;
551
552      switch(lc_markstack[i].state) {
553      case Unreached:
554	 MC_(blocks_leaked)++;
555	 MC_(bytes_leaked) += size;
556	 break;
557
558      case Proper:
559	 MC_(blocks_reachable)++;
560	 MC_(bytes_reachable) += size;
561	 break;
562
563      case Interior:
564	 MC_(blocks_dubious)++;
565	 MC_(bytes_dubious) += size;
566	 break;
567
568      case IndirectLeak:	/* shouldn't happen */
569	 MC_(blocks_indirect)++;
570	 MC_(bytes_indirect) += size;
571	 break;
572      }
573   }
574}
575
576static MC_Chunk**
577find_active_shadows(UInt* n_shadows)
578{
579   /* Our goal is to construct a set of shadows that includes every
580    * mempool chunk, and every malloc region that *doesn't* contain a
581    * mempool chunk. We do this in several phases.
582    *
583    * First we collect all the malloc chunks into an array and sort it.
584    * We do this because we want to query the chunks by interior
585    * pointers, requiring binary search.
586    *
587    * Second we build an array containing a Bool for each malloc chunk,
588    * indicating whether it contains any mempools.
589    *
590    * Third we loop over the mempool tables. For each chunk in each
591    * pool, we set the entry in the Bool array corresponding to the
592    * malloc chunk containing the mempool chunk.
593    *
594    * Finally we copy the mempool chunks and the non-marked malloc
595    * chunks into a combined array of shadows, free our temporaries,
596    * and return the combined array.
597    */
598
599   MC_Mempool *mp;
600   MC_Chunk **mallocs, **shadows, *mc;
601   UInt n_mallocs, m, s;
602   Bool *malloc_chunk_holds_a_pool_chunk;
603
604   mallocs = (MC_Chunk**) VG_(HT_to_array)( MC_(malloc_list), &n_mallocs );
605
606   if (n_mallocs == 0) {
607      tl_assert(mallocs == NULL);
608      *n_shadows = 0;
609      return NULL;
610   }
611
612   VG_(ssort)((void*)mallocs, n_mallocs,
613              sizeof(VgHashNode*), lc_compar);
614
615   malloc_chunk_holds_a_pool_chunk = VG_(calloc)( "mc.fas.1",
616                                                  n_mallocs, sizeof(Bool) );
617
618   *n_shadows = n_mallocs;
619
620   VG_(HT_ResetIter)(MC_(mempool_list));
621   while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
622      VG_(HT_ResetIter)(mp->chunks);
623      while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
624
625         /* We'll need a shadow for this chunk. */
626         ++(*n_shadows);
627
628         /* Possibly invalidate the malloc holding the beginning of
629            this chunk. */
630         m = find_shadow_for(mc->data, mallocs, n_mallocs);
631         if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) {
632            tl_assert(*n_shadows > 0);
633            --(*n_shadows);
634            malloc_chunk_holds_a_pool_chunk[m] = True;
635         }
636
637         /* Possibly invalidate the malloc holding the end of this chunk. */
638         if (mc->szB > 1) {
639            m = find_shadow_for(mc->data + (mc->szB - 1), mallocs, n_mallocs);
640            if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) {
641               tl_assert(*n_shadows > 0);
642               --(*n_shadows);
643               malloc_chunk_holds_a_pool_chunk[m] = True;
644            }
645         }
646      }
647   }
648
649   tl_assert(*n_shadows > 0);
650   shadows = VG_(malloc)("mc.fas.2", sizeof(VgHashNode*) * (*n_shadows));
651   s = 0;
652
653   /* Copy the mempool chunks into the final array. */
654   VG_(HT_ResetIter)(MC_(mempool_list));
655   while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
656      VG_(HT_ResetIter)(mp->chunks);
657      while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
658         tl_assert(s < *n_shadows);
659         shadows[s++] = mc;
660      }
661   }
662
663   /* Copy the malloc chunks into the final array. */
664   for (m = 0; m < n_mallocs; ++m) {
665      if (!malloc_chunk_holds_a_pool_chunk[m]) {
666         tl_assert(s < *n_shadows);
667         shadows[s++] = mallocs[m];
668      }
669   }
670
671   tl_assert(s == *n_shadows);
672   VG_(free)(mallocs);
673   VG_(free)(malloc_chunk_holds_a_pool_chunk);
674
675   return shadows;
676}
677
678
679/* Top level entry point to leak detector.  Call here, passing in
680   suitable address-validating functions (see comment at top of
681   scan_all_valid_memory above).  These functions used to encapsulate the
682   differences between Memcheck and Addrcheck;  they no longer do but it
683   doesn't hurt to keep them here.
684*/
685void MC_(do_detect_memory_leaks) (
686   ThreadId tid, LeakCheckMode mode,
687   Bool (*is_within_valid_secondary) ( Addr ),
688   Bool (*is_valid_aligned_word)     ( Addr )
689)
690{
691   Int i;
692
693   tl_assert(mode != LC_Off);
694
695   lc_shadows = find_active_shadows(&lc_n_shadows);
696
697   /* Sort the array. */
698   VG_(ssort)((void*)lc_shadows, lc_n_shadows, sizeof(VgHashNode*), lc_compar);
699
700   /* Sanity check; assert that the blocks are now in order */
701   for (i = 0; i < lc_n_shadows-1; i++) {
702      tl_assert( lc_shadows[i]->data <= lc_shadows[i+1]->data);
703   }
704
705   /* Sanity check -- make sure they don't overlap.  But do allow
706      exact duplicates.  If this assertion fails, it may mean that the
707      application has done something stupid with
708      VALGRIND_MALLOCLIKE_BLOCK client requests, specifically, has
709      made overlapping requests (which are nonsensical).  Another way
710      to screw up is to use VALGRIND_MALLOCLIKE_BLOCK for stack
711      locations; again nonsensical. */
712   for (i = 0; i < lc_n_shadows-1; i++) {
713      Bool nonsense_overlap = ! (
714            /* normal case - no overlap */
715            (lc_shadows[i]->data + lc_shadows[i]->szB <= lc_shadows[i+1]->data)
716         ||
717            /* degenerate case: exact duplicates */
718              (lc_shadows[i]->data == lc_shadows[i+1]->data
719            && lc_shadows[i]->szB == lc_shadows[i+1]->szB)
720         );
721      if (nonsense_overlap) {
722         VG_(message)(Vg_UserMsg, "Block [0x%lx, 0x%lx) overlaps with block [0x%lx, 0x%lx)",
723                      lc_shadows[   i]->data, (lc_shadows[   i]->data + lc_shadows[   i]->szB),
724                      lc_shadows[1+ i]->data, (lc_shadows[1+ i]->data + lc_shadows[1+ i]->szB) );
725      }
726      tl_assert (!nonsense_overlap);
727   }
728
729   if (lc_n_shadows == 0) {
730      tl_assert(lc_shadows == NULL);
731      if (VG_(clo_verbosity) >= 1 && !VG_(clo_xml)) {
732         VG_(message)(Vg_UserMsg,
733                      "All heap blocks were freed -- no leaks are possible.");
734      }
735      return;
736   }
737
738   if (VG_(clo_verbosity) > 0 && !VG_(clo_xml))
739      VG_(message)(Vg_UserMsg,
740                   "searching for pointers to %'d not-freed blocks.",
741                   lc_n_shadows );
742
743   lc_min_mallocd_addr = lc_shadows[0]->data;
744   lc_max_mallocd_addr = lc_shadows[lc_n_shadows-1]->data
745                         + lc_shadows[lc_n_shadows-1]->szB;
746
747   lc_markstack = VG_(malloc)( "mc.ddml.1",
748                               lc_n_shadows * sizeof(*lc_markstack) );
749   for (i = 0; i < lc_n_shadows; i++) {
750      lc_markstack[i].next = -1;
751      lc_markstack[i].state = Unreached;
752      lc_markstack[i].indirect = 0;
753   }
754   lc_markstack_top = -1;
755
756   lc_is_within_valid_secondary = is_within_valid_secondary;
757   lc_is_valid_aligned_word     = is_valid_aligned_word;
758
759   lc_scanned = 0;
760
761   /* Push roots onto the mark stack.  Roots are:
762      - the integer registers of all threads
763      - all mappings belonging to the client, including stacks
764      - .. but excluding any client heap segments.
765      Client heap segments are excluded because we wish to differentiate
766      client heap blocks which are referenced only from inside the heap
767      from those outside.  This facilitates the indirect vs direct loss
768      categorisation, which [if the users ever manage to understand it]
769      is really useful for detecting lost cycles.
770   */
771   { Addr*     seg_starts;
772     Int       n_seg_starts;
773     seg_starts = get_seg_starts( &n_seg_starts );
774     tl_assert(seg_starts && n_seg_starts > 0);
775     /* VG_(am_show_nsegments)( 0,"leakcheck"); */
776     for (i = 0; i < n_seg_starts; i++) {
777        NSegment const* seg = VG_(am_find_nsegment)( seg_starts[i] );
778        tl_assert(seg);
779        if (seg->kind != SkFileC && seg->kind != SkAnonC)
780           continue;
781        if (!(seg->hasR && seg->hasW))
782           continue;
783        if (seg->isCH)
784           continue;
785
786        /* Don't poke around in device segments as this may cause
787           hangs.  Exclude /dev/zero just in case someone allocated
788           memory by explicitly mapping /dev/zero. */
789        if (seg->kind == SkFileC
790            && (VKI_S_ISCHR(seg->mode) || VKI_S_ISBLK(seg->mode))) {
791           HChar* dev_name = VG_(am_get_filename)( (NSegment*)seg );
792           if (dev_name && 0 == VG_(strcmp)(dev_name, "/dev/zero")) {
793              /* don't skip /dev/zero */
794           } else {
795              /* skip this device mapping */
796              continue;
797           }
798        }
799
800        if (0)
801           VG_(printf)("ACCEPT %2d  %#lx %#lx\n", i, seg->start, seg->end);
802        lc_scan_memory(seg->start, seg->end+1 - seg->start);
803     }
804   }
805
806   /* Push registers onto mark stack */
807   VG_(apply_to_GP_regs)(lc_markstack_push);
808
809   /* Keep walking the heap until everything is found */
810   lc_do_leakcheck(-1);
811
812   if (VG_(clo_verbosity) > 0 && !VG_(clo_xml))
813      VG_(message)(Vg_UserMsg, "checked %'lu bytes.", lc_scanned);
814
815   MC_(blocks_leaked)     = MC_(bytes_leaked)     = 0;
816   MC_(blocks_indirect)   = MC_(bytes_indirect)   = 0;
817   MC_(blocks_dubious)    = MC_(bytes_dubious)    = 0;
818   MC_(blocks_reachable)  = MC_(bytes_reachable)  = 0;
819   MC_(blocks_suppressed) = MC_(bytes_suppressed) = 0;
820
821   if (mode == LC_Full)
822      full_report(tid);
823   else
824      make_summary();
825
826   if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
827      VG_(message)(Vg_UserMsg, "");
828      VG_(message)(Vg_UserMsg, "LEAK SUMMARY:");
829      VG_(message)(Vg_UserMsg, "   definitely lost: %'lu bytes in %'lu blocks.",
830                               MC_(bytes_leaked), MC_(blocks_leaked) );
831      if (MC_(blocks_indirect) > 0)
832	 VG_(message)(Vg_UserMsg, "   indirectly lost: %'lu bytes in %'lu blocks.",
833		      MC_(bytes_indirect), MC_(blocks_indirect) );
834      VG_(message)(Vg_UserMsg, "     possibly lost: %'lu bytes in %'lu blocks.",
835                               MC_(bytes_dubious), MC_(blocks_dubious) );
836      VG_(message)(Vg_UserMsg, "   still reachable: %'lu bytes in %'lu blocks.",
837                               MC_(bytes_reachable), MC_(blocks_reachable) );
838      VG_(message)(Vg_UserMsg, "        suppressed: %'lu bytes in %'lu blocks.",
839                               MC_(bytes_suppressed), MC_(blocks_suppressed) );
840      if (mode == LC_Summary
841          && (MC_(blocks_leaked) + MC_(blocks_indirect)
842              + MC_(blocks_dubious) + MC_(blocks_reachable)) > 0) {
843         VG_(message)(Vg_UserMsg,
844                      "Rerun with --leak-check=full to see details of leaked memory.");
845      }
846      if (MC_(blocks_reachable) > 0 && !MC_(clo_show_reachable) && mode == LC_Full) {
847         VG_(message)(Vg_UserMsg,
848           "Reachable blocks (those to which a pointer was found) are not shown.");
849         VG_(message)(Vg_UserMsg,
850            "To see them, rerun with: --leak-check=full --show-reachable=yes");
851      }
852   }
853
854   VG_(free) ( lc_shadows );
855   VG_(free) ( lc_markstack );
856}
857
858/*--------------------------------------------------------------------*/
859/*--- end                                                          ---*/
860/*--------------------------------------------------------------------*/
861
862