1
2/*--------------------------------------------------------------------*/
3/*--- Management, printing, etc, of errors and suppressions.       ---*/
4/*---                                                  mc_errors.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8   This file is part of MemCheck, a heavyweight Valgrind tool for
9   detecting memory errors.
10
11   Copyright (C) 2000-2012 Julian Seward
12      jseward@acm.org
13
14   This program is free software; you can redistribute it and/or
15   modify it under the terms of the GNU General Public License as
16   published by the Free Software Foundation; either version 2 of the
17   License, or (at your option) any later version.
18
19   This program is distributed in the hope that it will be useful, but
20   WITHOUT ANY WARRANTY; without even the implied warranty of
21   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22   General Public License for more details.
23
24   You should have received a copy of the GNU General Public License
25   along with this program; if not, write to the Free Software
26   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27   02111-1307, USA.
28
29   The GNU General Public License is contained in the file COPYING.
30*/
31
32#include "pub_tool_basics.h"
33#include "pub_tool_gdbserver.h"
34#include "pub_tool_poolalloc.h"     // For mc_include.h
35#include "pub_tool_hashtable.h"     // For mc_include.h
36#include "pub_tool_libcbase.h"
37#include "pub_tool_libcassert.h"
38#include "pub_tool_libcprint.h"
39#include "pub_tool_machine.h"
40#include "pub_tool_mallocfree.h"
41#include "pub_tool_options.h"
42#include "pub_tool_replacemalloc.h"
43#include "pub_tool_tooliface.h"
44#include "pub_tool_threadstate.h"
45#include "pub_tool_debuginfo.h"     // VG_(get_dataname_and_offset)
46#include "pub_tool_xarray.h"
47
48#include "mc_include.h"
49
50
51/*------------------------------------------------------------*/
52/*--- Error types                                          ---*/
53/*------------------------------------------------------------*/
54
55/* See comment in mc_include.h */
56Bool MC_(any_value_errors) = False;
57
58
59// Different kinds of blocks.
60typedef enum {
61   Block_Mallocd = 111,
62   Block_Freed,
63   Block_Mempool,
64   Block_MempoolChunk,
65   Block_UserG
66} BlockKind;
67
68/* ------------------ Addresses -------------------- */
69
70/* The classification of a faulting address. */
71typedef
72   enum {
73      Addr_Undescribed, // as-yet unclassified
74      Addr_Unknown,     // classification yielded nothing useful
75      Addr_Block,       // in malloc'd/free'd block
76      Addr_Stack,       // on a thread's stack
77      Addr_DataSym,     // in a global data sym
78      Addr_Variable,    // variable described by the debug info
79      Addr_SectKind     // last-ditch classification attempt
80   }
81   AddrTag;
82
83typedef
84   struct _AddrInfo
85   AddrInfo;
86
87struct _AddrInfo {
88   AddrTag tag;
89   union {
90      // As-yet unclassified.
91      struct { } Undescribed;
92
93      // On a stack.
94      struct {
95         ThreadId tid;        // Which thread's stack?
96      } Stack;
97
98      // This covers heap blocks (normal and from mempools) and user-defined
99      // blocks.
100      struct {
101         BlockKind   block_kind;
102         Char*       block_desc;    // "block", "mempool" or user-defined
103         SizeT       block_szB;
104         PtrdiffT    rwoffset;
105         ExeContext* lastchange;
106      } Block;
107
108      // In a global .data symbol.  This holds the first 127 chars of
109      // the variable's name (zero terminated), plus a (memory) offset.
110      struct {
111         Char     name[128];
112         PtrdiffT offset;
113      } DataSym;
114
115      // Is described by Dwarf debug info.  XArray*s of HChar.
116      struct {
117         XArray* /* of HChar */ descr1;
118         XArray* /* of HChar */ descr2;
119      } Variable;
120
121      // Could only narrow it down to be the PLT/GOT/etc of a given
122      // object.  Better than nothing, perhaps.
123      struct {
124         Char       objname[128];
125         VgSectKind kind;
126      } SectKind;
127
128      // Classification yielded nothing useful.
129      struct { } Unknown;
130
131   } Addr;
132};
133
134/* ------------------ Errors ----------------------- */
135
136/* What kind of error it is. */
137typedef
138   enum {
139      Err_Value,
140      Err_Cond,
141      Err_CoreMem,
142      Err_Addr,
143      Err_Jump,
144      Err_RegParam,
145      Err_MemParam,
146      Err_User,
147      Err_Free,
148      Err_FreeMismatch,
149      Err_Overlap,
150      Err_Leak,
151      Err_IllegalMempool,
152   }
153   MC_ErrorTag;
154
155
156typedef struct _MC_Error MC_Error;
157
158struct _MC_Error {
159   // Nb: we don't need the tag here, as it's stored in the Error type! Yuk.
160   //MC_ErrorTag tag;
161
162   union {
163      // Use of an undefined value:
164      // - as a pointer in a load or store
165      // - as a jump target
166      struct {
167         SizeT szB;   // size of value in bytes
168         // Origin info
169         UInt        otag;      // origin tag
170         ExeContext* origin_ec; // filled in later
171      } Value;
172
173      // Use of an undefined value in a conditional branch or move.
174      struct {
175         // Origin info
176         UInt        otag;      // origin tag
177         ExeContext* origin_ec; // filled in later
178      } Cond;
179
180      // Addressability error in core (signal-handling) operation.
181      // It would be good to get rid of this error kind, merge it with
182      // another one somehow.
183      struct {
184      } CoreMem;
185
186      // Use of an unaddressable memory location in a load or store.
187      struct {
188         Bool     isWrite;    // read or write?
189         SizeT    szB;        // not used for exec (jump) errors
190         Bool     maybe_gcc;  // True if just below %esp -- could be a gcc bug
191         AddrInfo ai;
192      } Addr;
193
194      // Jump to an unaddressable memory location.
195      struct {
196         AddrInfo ai;
197      } Jump;
198
199      // System call register input contains undefined bytes.
200      struct {
201         // Origin info
202         UInt        otag;      // origin tag
203         ExeContext* origin_ec; // filled in later
204      } RegParam;
205
206      // System call memory input contains undefined/unaddressable bytes
207      struct {
208         Bool     isAddrErr;  // Addressability or definedness error?
209         AddrInfo ai;
210         // Origin info
211         UInt        otag;      // origin tag
212         ExeContext* origin_ec; // filled in later
213      } MemParam;
214
215      // Problem found from a client request like CHECK_MEM_IS_ADDRESSABLE.
216      struct {
217         Bool     isAddrErr;  // Addressability or definedness error?
218         AddrInfo ai;
219         // Origin info
220         UInt        otag;      // origin tag
221         ExeContext* origin_ec; // filled in later
222      } User;
223
224      // Program tried to free() something that's not a heap block (this
225      // covers double-frees). */
226      struct {
227         AddrInfo ai;
228      } Free;
229
230      // Program allocates heap block with one function
231      // (malloc/new/new[]/custom) and deallocates with not the matching one.
232      struct {
233         AddrInfo ai;
234      } FreeMismatch;
235
236      // Call to strcpy, memcpy, etc, with overlapping blocks.
237      struct {
238         Addr src;   // Source block
239         Addr dst;   // Destination block
240         Int  szB;   // Size in bytes;  0 if unused.
241      } Overlap;
242
243      // A memory leak.
244      struct {
245         UInt        n_this_record;
246         UInt        n_total_records;
247         LossRecord* lr;
248      } Leak;
249
250      // A memory pool error.
251      struct {
252         AddrInfo ai;
253      } IllegalMempool;
254
255   } Err;
256};
257
258
259/*------------------------------------------------------------*/
260/*--- Printing errors                                      ---*/
261/*------------------------------------------------------------*/
262
263/* This is the "this error is due to be printed shortly; so have a
264   look at it any print any preamble you want" function.  Which, in
265   Memcheck, we don't use.  Hence a no-op.
266*/
267void MC_(before_pp_Error) ( Error* err ) {
268}
269
270/* Do a printf-style operation on either the XML or normal output
271   channel, depending on the setting of VG_(clo_xml).
272*/
273static void emit_WRK ( HChar* format, va_list vargs )
274{
275   if (VG_(clo_xml)) {
276      VG_(vprintf_xml)(format, vargs);
277   } else {
278      VG_(vmessage)(Vg_UserMsg, format, vargs);
279   }
280}
281static void emit ( HChar* format, ... ) PRINTF_CHECK(1, 2);
282static void emit ( HChar* format, ... )
283{
284   va_list vargs;
285   va_start(vargs, format);
286   emit_WRK(format, vargs);
287   va_end(vargs);
288}
289static void emiN ( HChar* format, ... ) /* NO FORMAT CHECK */
290{
291   va_list vargs;
292   va_start(vargs, format);
293   emit_WRK(format, vargs);
294   va_end(vargs);
295}
296
297
298static void mc_pp_AddrInfo ( Addr a, AddrInfo* ai, Bool maybe_gcc )
299{
300   HChar* xpre  = VG_(clo_xml) ? "  <auxwhat>" : " ";
301   HChar* xpost = VG_(clo_xml) ? "</auxwhat>"  : "";
302
303   switch (ai->tag) {
304      case Addr_Unknown:
305         if (maybe_gcc) {
306            emit( "%sAddress 0x%llx is just below the stack ptr.  "
307                  "To suppress, use: --workaround-gcc296-bugs=yes%s\n",
308                  xpre, (ULong)a, xpost );
309	 } else {
310            emit( "%sAddress 0x%llx "
311                  "is not stack'd, malloc'd or (recently) free'd%s\n",
312                  xpre, (ULong)a, xpost );
313         }
314         break;
315
316      case Addr_Stack:
317         emit( "%sAddress 0x%llx is on thread %d's stack%s\n",
318               xpre, (ULong)a, ai->Addr.Stack.tid, xpost );
319         break;
320
321      case Addr_Block: {
322         SizeT    block_szB = ai->Addr.Block.block_szB;
323         PtrdiffT rwoffset  = ai->Addr.Block.rwoffset;
324         SizeT    delta;
325         const    Char* relative;
326
327         if (rwoffset < 0) {
328            delta    = (SizeT)(-rwoffset);
329            relative = "before";
330         } else if (rwoffset >= block_szB) {
331            delta    = rwoffset - block_szB;
332            relative = "after";
333         } else {
334            delta    = rwoffset;
335            relative = "inside";
336         }
337         emit(
338            "%sAddress 0x%lx is %'lu bytes %s a %s of size %'lu %s%s\n",
339            xpre,
340            a, delta, relative, ai->Addr.Block.block_desc,
341            block_szB,
342            ai->Addr.Block.block_kind==Block_Mallocd ? "alloc'd"
343            : ai->Addr.Block.block_kind==Block_Freed ? "free'd"
344                                                     : "client-defined",
345            xpost
346         );
347         VG_(pp_ExeContext)(ai->Addr.Block.lastchange);
348         break;
349      }
350
351      case Addr_DataSym:
352         emiN( "%sAddress 0x%llx is %llu bytes "
353               "inside data symbol \"%pS\"%s\n",
354               xpre,
355               (ULong)a,
356               (ULong)ai->Addr.DataSym.offset,
357               ai->Addr.DataSym.name,
358               xpost );
359         break;
360
361      case Addr_Variable:
362         /* Note, no need for XML tags here, because descr1/2 will
363            already have <auxwhat> or <xauxwhat>s on them, in XML
364            mode. */
365         if (ai->Addr.Variable.descr1)
366            emit( "%s%s\n",
367                  VG_(clo_xml) ? "  " : " ",
368                  (HChar*)VG_(indexXA)(ai->Addr.Variable.descr1, 0) );
369         if (ai->Addr.Variable.descr2)
370            emit( "%s%s\n",
371                  VG_(clo_xml) ? "  " : " ",
372                  (HChar*)VG_(indexXA)(ai->Addr.Variable.descr2, 0) );
373         break;
374
375      case Addr_SectKind:
376         emiN( "%sAddress 0x%llx is in the %pS segment of %pS%s\n",
377               xpre,
378               (ULong)a,
379               VG_(pp_SectKind)(ai->Addr.SectKind.kind),
380               ai->Addr.SectKind.objname,
381               xpost );
382         break;
383
384      default:
385         VG_(tool_panic)("mc_pp_AddrInfo");
386   }
387}
388
389static const HChar* str_leak_lossmode ( Reachedness lossmode )
390{
391   const HChar *loss = "?";
392   switch (lossmode) {
393      case Unreached:    loss = "definitely lost"; break;
394      case IndirectLeak: loss = "indirectly lost"; break;
395      case Possible:     loss = "possibly lost"; break;
396      case Reachable:    loss = "still reachable"; break;
397   }
398   return loss;
399}
400
401static const HChar* xml_leak_kind ( Reachedness lossmode )
402{
403   const HChar *loss = "?";
404   switch (lossmode) {
405      case Unreached:    loss = "Leak_DefinitelyLost"; break;
406      case IndirectLeak: loss = "Leak_IndirectlyLost"; break;
407      case Possible:     loss = "Leak_PossiblyLost"; break;
408      case Reachable:    loss = "Leak_StillReachable"; break;
409   }
410   return loss;
411}
412
413static void mc_pp_origin ( ExeContext* ec, UInt okind )
414{
415   HChar* src = NULL;
416   tl_assert(ec);
417
418   switch (okind) {
419      case MC_OKIND_STACK:   src = " by a stack allocation"; break;
420      case MC_OKIND_HEAP:    src = " by a heap allocation"; break;
421      case MC_OKIND_USER:    src = " by a client request"; break;
422      case MC_OKIND_UNKNOWN: src = ""; break;
423   }
424   tl_assert(src); /* guards against invalid 'okind' */
425
426   if (VG_(clo_xml)) {
427      emit( "  <auxwhat>Uninitialised value was created%s</auxwhat>\n",
428            src);
429      VG_(pp_ExeContext)( ec );
430   } else {
431      emit( " Uninitialised value was created%s\n", src);
432      VG_(pp_ExeContext)( ec );
433   }
434}
435
436char * MC_(snprintf_delta) (char * buf, Int size,
437                            SizeT current_val, SizeT old_val,
438                            LeakCheckDeltaMode delta_mode)
439{
440   if (delta_mode == LCD_Any)
441      buf[0] = '\0';
442   else if (current_val >= old_val)
443      VG_(snprintf) (buf, size, " (+%'lu)", current_val - old_val);
444   else
445      VG_(snprintf) (buf, size, " (-%'lu)", old_val - current_val);
446
447   return buf;
448}
449
450static void pp_LossRecord(UInt n_this_record, UInt n_total_records,
451                          LossRecord* lr, Bool xml)
452{
453   // char arrays to produce the indication of increase/decrease in case
454   // of delta_mode != LCD_Any
455   char        d_bytes[20];
456   char        d_direct_bytes[20];
457   char        d_indirect_bytes[20];
458   char        d_num_blocks[20];
459
460   MC_(snprintf_delta) (d_bytes, 20,
461                        lr->szB + lr->indirect_szB,
462                        lr->old_szB + lr->old_indirect_szB,
463                        MC_(detect_memory_leaks_last_delta_mode));
464   MC_(snprintf_delta) (d_direct_bytes, 20,
465                        lr->szB,
466                        lr->old_szB,
467                        MC_(detect_memory_leaks_last_delta_mode));
468   MC_(snprintf_delta) (d_indirect_bytes, 20,
469                        lr->indirect_szB,
470                        lr->old_indirect_szB,
471                        MC_(detect_memory_leaks_last_delta_mode));
472   MC_(snprintf_delta) (d_num_blocks, 20,
473                        (SizeT) lr->num_blocks,
474                        (SizeT) lr->old_num_blocks,
475                        MC_(detect_memory_leaks_last_delta_mode));
476
477   if (xml) {
478      emit("  <kind>%s</kind>\n", xml_leak_kind(lr->key.state));
479      if (lr->indirect_szB > 0) {
480         emit( "  <xwhat>\n" );
481         emit( "    <text>%'lu%s (%'lu%s direct, %'lu%s indirect) bytes "
482               "in %'u%s blocks"
483               " are %s in loss record %'u of %'u</text>\n",
484               lr->szB + lr->indirect_szB, d_bytes,
485               lr->szB, d_direct_bytes,
486               lr->indirect_szB, d_indirect_bytes,
487               lr->num_blocks, d_num_blocks,
488               str_leak_lossmode(lr->key.state),
489               n_this_record, n_total_records );
490         // Nb: don't put commas in these XML numbers
491         emit( "    <leakedbytes>%lu</leakedbytes>\n",
492               lr->szB + lr->indirect_szB );
493         emit( "    <leakedblocks>%u</leakedblocks>\n", lr->num_blocks );
494         emit( "  </xwhat>\n" );
495      } else {
496         emit( "  <xwhat>\n" );
497         emit( "    <text>%'lu%s bytes in %'u%s blocks"
498               " are %s in loss record %'u of %'u</text>\n",
499               lr->szB, d_direct_bytes,
500               lr->num_blocks, d_num_blocks,
501               str_leak_lossmode(lr->key.state),
502               n_this_record, n_total_records );
503         emit( "    <leakedbytes>%ld</leakedbytes>\n", lr->szB);
504         emit( "    <leakedblocks>%d</leakedblocks>\n", lr->num_blocks);
505         emit( "  </xwhat>\n" );
506      }
507      VG_(pp_ExeContext)(lr->key.allocated_at);
508   } else { /* ! if (xml) */
509      if (lr->indirect_szB > 0) {
510         emit(
511            "%'lu%s (%'lu%s direct, %'lu%s indirect) bytes in %'u%s blocks"
512            " are %s in loss record %'u of %'u\n",
513            lr->szB + lr->indirect_szB, d_bytes,
514            lr->szB, d_direct_bytes,
515            lr->indirect_szB, d_indirect_bytes,
516            lr->num_blocks, d_num_blocks,
517            str_leak_lossmode(lr->key.state),
518            n_this_record, n_total_records
519         );
520      } else {
521         emit(
522            "%'lu%s bytes in %'u%s blocks are %s in loss record %'u of %'u\n",
523            lr->szB, d_direct_bytes,
524            lr->num_blocks, d_num_blocks,
525            str_leak_lossmode(lr->key.state),
526            n_this_record, n_total_records
527         );
528      }
529      VG_(pp_ExeContext)(lr->key.allocated_at);
530   } /* if (xml) */
531}
532
533void MC_(pp_LossRecord)(UInt n_this_record, UInt n_total_records,
534                        LossRecord* l)
535{
536   pp_LossRecord (n_this_record, n_total_records, l, /* xml */ False);
537}
538
539void MC_(pp_Error) ( Error* err )
540{
541   const Bool xml  = VG_(clo_xml); /* a shorthand */
542   MC_Error* extra = VG_(get_error_extra)(err);
543
544   switch (VG_(get_error_kind)(err)) {
545      case Err_CoreMem:
546         /* What the hell *is* a CoreMemError? jrs 2005-May-18 */
547         /* As of 2006-Dec-14, it's caused by unaddressable bytes in a
548            signal handler frame.  --njn */
549         // JRS 17 May 09: None of our regtests exercise this; hence AFAIK
550         // the following code is untested.  Bad.
551         if (xml) {
552            emit( "  <kind>CoreMemError</kind>\n" );
553            emiN( "  <what>%pS contains unaddressable byte(s)</what>\n",
554                  VG_(get_error_string)(err));
555            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
556         } else {
557            emit( "%s contains unaddressable byte(s)\n",
558                  VG_(get_error_string)(err));
559            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
560         }
561         break;
562
563      case Err_Value:
564         MC_(any_value_errors) = True;
565         if (xml) {
566            emit( "  <kind>UninitValue</kind>\n" );
567            emit( "  <what>Use of uninitialised value of size %ld</what>\n",
568                  extra->Err.Value.szB );
569            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
570            if (extra->Err.Value.origin_ec)
571               mc_pp_origin( extra->Err.Value.origin_ec,
572                            extra->Err.Value.otag & 3 );
573         } else {
574            /* Could also show extra->Err.Cond.otag if debugging origin
575               tracking */
576            emit( "Use of uninitialised value of size %ld\n",
577                  extra->Err.Value.szB );
578            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
579            if (extra->Err.Value.origin_ec)
580               mc_pp_origin( extra->Err.Value.origin_ec,
581                            extra->Err.Value.otag & 3 );
582         }
583         break;
584
585      case Err_Cond:
586         MC_(any_value_errors) = True;
587         if (xml) {
588            emit( "  <kind>UninitCondition</kind>\n" );
589            emit( "  <what>Conditional jump or move depends"
590                  " on uninitialised value(s)</what>\n" );
591            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
592            if (extra->Err.Cond.origin_ec)
593               mc_pp_origin( extra->Err.Cond.origin_ec,
594                             extra->Err.Cond.otag & 3 );
595         } else {
596            /* Could also show extra->Err.Cond.otag if debugging origin
597               tracking */
598            emit( "Conditional jump or move depends"
599                  " on uninitialised value(s)\n" );
600            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
601            if (extra->Err.Cond.origin_ec)
602               mc_pp_origin( extra->Err.Cond.origin_ec,
603                             extra->Err.Cond.otag & 3 );
604         }
605         break;
606
607      case Err_RegParam:
608         MC_(any_value_errors) = True;
609         if (xml) {
610            emit( "  <kind>SyscallParam</kind>\n" );
611            emiN( "  <what>Syscall param %pS contains "
612                  "uninitialised byte(s)</what>\n",
613                  VG_(get_error_string)(err) );
614            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
615            if (extra->Err.RegParam.origin_ec)
616               mc_pp_origin( extra->Err.RegParam.origin_ec,
617                             extra->Err.RegParam.otag & 3 );
618         } else {
619            emit( "Syscall param %s contains uninitialised byte(s)\n",
620                  VG_(get_error_string)(err) );
621            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
622            if (extra->Err.RegParam.origin_ec)
623               mc_pp_origin( extra->Err.RegParam.origin_ec,
624                             extra->Err.RegParam.otag & 3 );
625         }
626         break;
627
628      case Err_MemParam:
629         if (!extra->Err.MemParam.isAddrErr)
630            MC_(any_value_errors) = True;
631         if (xml) {
632            emit( "  <kind>SyscallParam</kind>\n" );
633            emiN( "  <what>Syscall param %pS points to %s byte(s)</what>\n",
634                  VG_(get_error_string)(err),
635                  extra->Err.MemParam.isAddrErr
636                     ? "unaddressable" : "uninitialised" );
637            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
638            mc_pp_AddrInfo(VG_(get_error_address)(err),
639                           &extra->Err.MemParam.ai, False);
640            if (extra->Err.MemParam.origin_ec
641                && !extra->Err.MemParam.isAddrErr)
642               mc_pp_origin( extra->Err.MemParam.origin_ec,
643                             extra->Err.MemParam.otag & 3 );
644         } else {
645            emit( "Syscall param %s points to %s byte(s)\n",
646                  VG_(get_error_string)(err),
647                  extra->Err.MemParam.isAddrErr
648                     ? "unaddressable" : "uninitialised" );
649            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
650            mc_pp_AddrInfo(VG_(get_error_address)(err),
651                           &extra->Err.MemParam.ai, False);
652            if (extra->Err.MemParam.origin_ec
653                && !extra->Err.MemParam.isAddrErr)
654               mc_pp_origin( extra->Err.MemParam.origin_ec,
655                             extra->Err.MemParam.otag & 3 );
656         }
657         break;
658
659      case Err_User:
660         if (!extra->Err.User.isAddrErr)
661            MC_(any_value_errors) = True;
662         if (xml) {
663            emit( "  <kind>ClientCheck</kind>\n" );
664            emit( "  <what>%s byte(s) found "
665                  "during client check request</what>\n",
666                   extra->Err.User.isAddrErr
667                      ? "Unaddressable" : "Uninitialised" );
668            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
669            mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.User.ai,
670                           False);
671            if (extra->Err.User.origin_ec && !extra->Err.User.isAddrErr)
672               mc_pp_origin( extra->Err.User.origin_ec,
673                             extra->Err.User.otag & 3 );
674         } else {
675            emit( "%s byte(s) found during client check request\n",
676                   extra->Err.User.isAddrErr
677                      ? "Unaddressable" : "Uninitialised" );
678            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
679            mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.User.ai,
680                           False);
681            if (extra->Err.User.origin_ec && !extra->Err.User.isAddrErr)
682               mc_pp_origin( extra->Err.User.origin_ec,
683                             extra->Err.User.otag & 3 );
684         }
685         break;
686
687      case Err_Free:
688         if (xml) {
689            emit( "  <kind>InvalidFree</kind>\n" );
690            emit( "  <what>Invalid free() / delete / delete[]"
691                  " / realloc()</what>\n" );
692            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
693            mc_pp_AddrInfo( VG_(get_error_address)(err),
694                            &extra->Err.Free.ai, False );
695         } else {
696            emit( "Invalid free() / delete / delete[] / realloc()\n" );
697            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
698            mc_pp_AddrInfo( VG_(get_error_address)(err),
699                            &extra->Err.Free.ai, False );
700         }
701         break;
702
703      case Err_FreeMismatch:
704         if (xml) {
705            emit( "  <kind>MismatchedFree</kind>\n" );
706            emit( "  <what>Mismatched free() / delete / delete []</what>\n" );
707            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
708            mc_pp_AddrInfo(VG_(get_error_address)(err),
709                           &extra->Err.FreeMismatch.ai, False);
710         } else {
711            emit( "Mismatched free() / delete / delete []\n" );
712            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
713            mc_pp_AddrInfo(VG_(get_error_address)(err),
714                           &extra->Err.FreeMismatch.ai, False);
715         }
716         break;
717
718      case Err_Addr:
719         if (xml) {
720            emit( "  <kind>Invalid%s</kind>\n",
721                  extra->Err.Addr.isWrite ? "Write" : "Read"  );
722            emit( "  <what>Invalid %s of size %ld</what>\n",
723                  extra->Err.Addr.isWrite ? "write" : "read",
724                  extra->Err.Addr.szB );
725            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
726            mc_pp_AddrInfo( VG_(get_error_address)(err),
727                            &extra->Err.Addr.ai,
728                            extra->Err.Addr.maybe_gcc );
729         } else {
730            emit( "Invalid %s of size %ld\n",
731                  extra->Err.Addr.isWrite ? "write" : "read",
732                  extra->Err.Addr.szB );
733            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
734
735            mc_pp_AddrInfo( VG_(get_error_address)(err),
736                            &extra->Err.Addr.ai,
737                            extra->Err.Addr.maybe_gcc );
738         }
739         break;
740
741      case Err_Jump:
742         if (xml) {
743            emit( "  <kind>InvalidJump</kind>\n" );
744            emit( "  <what>Jump to the invalid address stated "
745                  "on the next line</what>\n" );
746            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
747            mc_pp_AddrInfo( VG_(get_error_address)(err), &extra->Err.Jump.ai,
748                            False );
749         } else {
750            emit( "Jump to the invalid address stated on the next line\n" );
751            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
752            mc_pp_AddrInfo( VG_(get_error_address)(err), &extra->Err.Jump.ai,
753                            False );
754         }
755         break;
756
757      case Err_Overlap:
758         if (xml) {
759            emit( "  <kind>Overlap</kind>\n" );
760            if (extra->Err.Overlap.szB == 0) {
761               emiN( "  <what>Source and destination overlap "
762                     "in %pS(%#lx, %#lx)\n</what>\n",
763                     VG_(get_error_string)(err),
764                     extra->Err.Overlap.dst, extra->Err.Overlap.src );
765            } else {
766               emit( "  <what>Source and destination overlap "
767                     "in %s(%#lx, %#lx, %d)</what>\n",
768                     VG_(get_error_string)(err),
769                     extra->Err.Overlap.dst, extra->Err.Overlap.src,
770                     extra->Err.Overlap.szB );
771            }
772            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
773         } else {
774            if (extra->Err.Overlap.szB == 0) {
775               emiN( "Source and destination overlap in %pS(%#lx, %#lx)\n",
776                     VG_(get_error_string)(err),
777                     extra->Err.Overlap.dst, extra->Err.Overlap.src );
778            } else {
779               emit( "Source and destination overlap in %s(%#lx, %#lx, %d)\n",
780                     VG_(get_error_string)(err),
781                     extra->Err.Overlap.dst, extra->Err.Overlap.src,
782                     extra->Err.Overlap.szB );
783            }
784            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
785         }
786         break;
787
788      case Err_IllegalMempool:
789         // JRS 17 May 09: None of our regtests exercise this; hence AFAIK
790         // the following code is untested.  Bad.
791         if (xml) {
792            emit( "  <kind>InvalidMemPool</kind>\n" );
793            emit( "  <what>Illegal memory pool address</what>\n" );
794            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
795            mc_pp_AddrInfo( VG_(get_error_address)(err),
796                            &extra->Err.IllegalMempool.ai, False );
797         } else {
798            emit( "Illegal memory pool address\n" );
799            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
800            mc_pp_AddrInfo( VG_(get_error_address)(err),
801                            &extra->Err.IllegalMempool.ai, False );
802         }
803         break;
804
805      case Err_Leak: {
806         UInt        n_this_record   = extra->Err.Leak.n_this_record;
807         UInt        n_total_records = extra->Err.Leak.n_total_records;
808         LossRecord* lr              = extra->Err.Leak.lr;
809         pp_LossRecord (n_this_record, n_total_records, lr, xml);
810         break;
811      }
812
813      default:
814         VG_(printf)("Error:\n  unknown Memcheck error code %d\n",
815                     VG_(get_error_kind)(err));
816         VG_(tool_panic)("unknown error code in mc_pp_Error)");
817   }
818}
819
820/*------------------------------------------------------------*/
821/*--- Recording errors                                     ---*/
822/*------------------------------------------------------------*/
823
824/* These many bytes below %ESP are considered addressible if we're
825   doing the --workaround-gcc296-bugs hack. */
826#define VG_GCC296_BUG_STACK_SLOP 1024
827
828/* Is this address within some small distance below %ESP?  Used only
829   for the --workaround-gcc296-bugs kludge. */
830static Bool is_just_below_ESP( Addr esp, Addr aa )
831{
832   esp -= VG_STACK_REDZONE_SZB;
833   if (esp > aa && (esp - aa) <= VG_GCC296_BUG_STACK_SLOP)
834      return True;
835   else
836      return False;
837}
838
839/* --- Called from generated and non-generated code --- */
840
841void MC_(record_address_error) ( ThreadId tid, Addr a, Int szB,
842                                 Bool isWrite )
843{
844   MC_Error extra;
845   Bool     just_below_esp;
846
847   if (MC_(in_ignored_range)(a))
848      return;
849
850   if (VG_(is_watched)( (isWrite ? write_watchpoint : read_watchpoint), a, szB))
851      return;
852
853   just_below_esp = is_just_below_ESP( VG_(get_SP)(tid), a );
854
855   /* If this is caused by an access immediately below %ESP, and the
856      user asks nicely, we just ignore it. */
857   if (MC_(clo_workaround_gcc296_bugs) && just_below_esp)
858      return;
859
860   extra.Err.Addr.isWrite   = isWrite;
861   extra.Err.Addr.szB       = szB;
862   extra.Err.Addr.maybe_gcc = just_below_esp;
863   extra.Err.Addr.ai.tag    = Addr_Undescribed;
864   VG_(maybe_record_error)( tid, Err_Addr, a, /*s*/NULL, &extra );
865}
866
867void MC_(record_value_error) ( ThreadId tid, Int szB, UInt otag )
868{
869   MC_Error extra;
870   tl_assert( MC_(clo_mc_level) >= 2 );
871   if (otag > 0)
872      tl_assert( MC_(clo_mc_level) == 3 );
873   extra.Err.Value.szB       = szB;
874   extra.Err.Value.otag      = otag;
875   extra.Err.Value.origin_ec = NULL;  /* Filled in later */
876   VG_(maybe_record_error)( tid, Err_Value, /*addr*/0, /*s*/NULL, &extra );
877}
878
879void MC_(record_cond_error) ( ThreadId tid, UInt otag )
880{
881   MC_Error extra;
882   tl_assert( MC_(clo_mc_level) >= 2 );
883   if (otag > 0)
884      tl_assert( MC_(clo_mc_level) == 3 );
885   extra.Err.Cond.otag      = otag;
886   extra.Err.Cond.origin_ec = NULL;  /* Filled in later */
887   VG_(maybe_record_error)( tid, Err_Cond, /*addr*/0, /*s*/NULL, &extra );
888}
889
890/* --- Called from non-generated code --- */
891
892/* This is for memory errors in signal-related memory. */
893void MC_(record_core_mem_error) ( ThreadId tid, Char* msg )
894{
895   VG_(maybe_record_error)( tid, Err_CoreMem, /*addr*/0, msg, /*extra*/NULL );
896}
897
898void MC_(record_regparam_error) ( ThreadId tid, Char* msg, UInt otag )
899{
900   MC_Error extra;
901   tl_assert(VG_INVALID_THREADID != tid);
902   if (otag > 0)
903      tl_assert( MC_(clo_mc_level) == 3 );
904   extra.Err.RegParam.otag      = otag;
905   extra.Err.RegParam.origin_ec = NULL;  /* Filled in later */
906   VG_(maybe_record_error)( tid, Err_RegParam, /*addr*/0, msg, &extra );
907}
908
909void MC_(record_memparam_error) ( ThreadId tid, Addr a,
910                                  Bool isAddrErr, Char* msg, UInt otag )
911{
912   MC_Error extra;
913   tl_assert(VG_INVALID_THREADID != tid);
914   if (!isAddrErr)
915      tl_assert( MC_(clo_mc_level) >= 2 );
916   if (otag != 0) {
917      tl_assert( MC_(clo_mc_level) == 3 );
918      tl_assert( !isAddrErr );
919   }
920   extra.Err.MemParam.isAddrErr = isAddrErr;
921   extra.Err.MemParam.ai.tag    = Addr_Undescribed;
922   extra.Err.MemParam.otag      = otag;
923   extra.Err.MemParam.origin_ec = NULL;  /* Filled in later */
924   VG_(maybe_record_error)( tid, Err_MemParam, a, msg, &extra );
925}
926
927void MC_(record_jump_error) ( ThreadId tid, Addr a )
928{
929   MC_Error extra;
930   tl_assert(VG_INVALID_THREADID != tid);
931   extra.Err.Jump.ai.tag = Addr_Undescribed;
932   VG_(maybe_record_error)( tid, Err_Jump, a, /*s*/NULL, &extra );
933}
934
935void MC_(record_free_error) ( ThreadId tid, Addr a )
936{
937   MC_Error extra;
938   tl_assert(VG_INVALID_THREADID != tid);
939   extra.Err.Free.ai.tag = Addr_Undescribed;
940   VG_(maybe_record_error)( tid, Err_Free, a, /*s*/NULL, &extra );
941}
942
943void MC_(record_freemismatch_error) ( ThreadId tid, MC_Chunk* mc )
944{
945   MC_Error extra;
946   AddrInfo* ai = &extra.Err.FreeMismatch.ai;
947   tl_assert(VG_INVALID_THREADID != tid);
948   ai->tag = Addr_Block;
949   ai->Addr.Block.block_kind = Block_Mallocd;  // Nb: Not 'Block_Freed'
950   ai->Addr.Block.block_desc = "block";
951   ai->Addr.Block.block_szB  = mc->szB;
952   ai->Addr.Block.rwoffset   = 0;
953   ai->Addr.Block.lastchange = mc->where;
954   VG_(maybe_record_error)( tid, Err_FreeMismatch, mc->data, /*s*/NULL,
955                            &extra );
956}
957
958void MC_(record_illegal_mempool_error) ( ThreadId tid, Addr a )
959{
960   MC_Error extra;
961   tl_assert(VG_INVALID_THREADID != tid);
962   extra.Err.IllegalMempool.ai.tag = Addr_Undescribed;
963   VG_(maybe_record_error)( tid, Err_IllegalMempool, a, /*s*/NULL, &extra );
964}
965
966void MC_(record_overlap_error) ( ThreadId tid, Char* function,
967                                 Addr src, Addr dst, SizeT szB )
968{
969   MC_Error extra;
970   tl_assert(VG_INVALID_THREADID != tid);
971   extra.Err.Overlap.src = src;
972   extra.Err.Overlap.dst = dst;
973   extra.Err.Overlap.szB = szB;
974   VG_(maybe_record_error)(
975      tid, Err_Overlap, /*addr*/0, /*s*/function, &extra );
976}
977
978Bool MC_(record_leak_error) ( ThreadId tid, UInt n_this_record,
979                              UInt n_total_records, LossRecord* lr,
980                              Bool print_record, Bool count_error )
981{
982   MC_Error extra;
983   extra.Err.Leak.n_this_record   = n_this_record;
984   extra.Err.Leak.n_total_records = n_total_records;
985   extra.Err.Leak.lr              = lr;
986   return
987   VG_(unique_error) ( tid, Err_Leak, /*Addr*/0, /*s*/NULL, &extra,
988                       lr->key.allocated_at, print_record,
989                       /*allow_GDB_attach*/False, count_error );
990}
991
992void MC_(record_user_error) ( ThreadId tid, Addr a,
993                              Bool isAddrErr, UInt otag )
994{
995   MC_Error extra;
996   if (otag != 0) {
997      tl_assert(!isAddrErr);
998      tl_assert( MC_(clo_mc_level) == 3 );
999   }
1000   if (!isAddrErr) {
1001      tl_assert( MC_(clo_mc_level) >= 2 );
1002   }
1003   tl_assert(VG_INVALID_THREADID != tid);
1004   extra.Err.User.isAddrErr = isAddrErr;
1005   extra.Err.User.ai.tag    = Addr_Undescribed;
1006   extra.Err.User.otag      = otag;
1007   extra.Err.User.origin_ec = NULL;  /* Filled in later */
1008   VG_(maybe_record_error)( tid, Err_User, a, /*s*/NULL, &extra );
1009}
1010
1011/*------------------------------------------------------------*/
1012/*--- Other error operations                               ---*/
1013/*------------------------------------------------------------*/
1014
1015/* Compare error contexts, to detect duplicates.  Note that if they
1016   are otherwise the same, the faulting addrs and associated rwoffsets
1017   are allowed to be different.  */
1018Bool MC_(eq_Error) ( VgRes res, Error* e1, Error* e2 )
1019{
1020   MC_Error* extra1 = VG_(get_error_extra)(e1);
1021   MC_Error* extra2 = VG_(get_error_extra)(e2);
1022
1023   /* Guaranteed by calling function */
1024   tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
1025
1026   switch (VG_(get_error_kind)(e1)) {
1027      case Err_CoreMem: {
1028         Char *e1s, *e2s;
1029         e1s = VG_(get_error_string)(e1);
1030         e2s = VG_(get_error_string)(e2);
1031         if (e1s == e2s)                   return True;
1032         if (VG_STREQ(e1s, e2s))           return True;
1033         return False;
1034      }
1035
1036      case Err_RegParam:
1037         return VG_STREQ(VG_(get_error_string)(e1), VG_(get_error_string)(e2));
1038
1039      // Perhaps we should also check the addrinfo.akinds for equality.
1040      // That would result in more error reports, but only in cases where
1041      // a register contains uninitialised bytes and points to memory
1042      // containing uninitialised bytes.  Currently, the 2nd of those to be
1043      // detected won't be reported.  That is (nearly?) always the memory
1044      // error, which is good.
1045      case Err_MemParam:
1046         if (!VG_STREQ(VG_(get_error_string)(e1),
1047                       VG_(get_error_string)(e2))) return False;
1048         // fall through
1049      case Err_User:
1050         return ( extra1->Err.User.isAddrErr == extra2->Err.User.isAddrErr
1051                ? True : False );
1052
1053      case Err_Free:
1054      case Err_FreeMismatch:
1055      case Err_Jump:
1056      case Err_IllegalMempool:
1057      case Err_Overlap:
1058      case Err_Cond:
1059         return True;
1060
1061      case Err_Addr:
1062         return ( extra1->Err.Addr.szB == extra2->Err.Addr.szB
1063                ? True : False );
1064
1065      case Err_Value:
1066         return ( extra1->Err.Value.szB == extra2->Err.Value.szB
1067                ? True : False );
1068
1069      case Err_Leak:
1070         VG_(tool_panic)("Shouldn't get Err_Leak in mc_eq_Error,\n"
1071                         "since it's handled with VG_(unique_error)()!");
1072
1073      default:
1074         VG_(printf)("Error:\n  unknown error code %d\n",
1075                     VG_(get_error_kind)(e1));
1076         VG_(tool_panic)("unknown error code in mc_eq_Error");
1077   }
1078}
1079
1080/* Functions used when searching MC_Chunk lists */
1081static
1082Bool addr_is_in_MC_Chunk_default_REDZONE_SZB(MC_Chunk* mc, Addr a)
1083{
1084   return VG_(addr_is_in_block)( a, mc->data, mc->szB,
1085                                 MC_(Malloc_Redzone_SzB) );
1086}
1087static
1088Bool addr_is_in_MC_Chunk_with_REDZONE_SZB(MC_Chunk* mc, Addr a, SizeT rzB)
1089{
1090   return VG_(addr_is_in_block)( a, mc->data, mc->szB,
1091                                 rzB );
1092}
1093
1094// Forward declarations
1095static Bool client_block_maybe_describe( Addr a, AddrInfo* ai );
1096static Bool mempool_block_maybe_describe( Addr a, AddrInfo* ai );
1097
1098
1099/* Describe an address as best you can, for error messages,
1100   putting the result in ai. */
1101static void describe_addr ( Addr a, /*OUT*/AddrInfo* ai )
1102{
1103   MC_Chunk*  mc;
1104   ThreadId   tid;
1105   Addr       stack_min, stack_max;
1106   VgSectKind sect;
1107
1108   tl_assert(Addr_Undescribed == ai->tag);
1109
1110   /* -- Perhaps it's a user-named block? -- */
1111   if (client_block_maybe_describe( a, ai )) {
1112      return;
1113   }
1114   /* -- Perhaps it's in mempool block? -- */
1115   if (mempool_block_maybe_describe( a, ai )) {
1116      return;
1117   }
1118   /* Blocks allocated by memcheck malloc functions are either
1119      on the recently freed list or on the malloc-ed list.
1120      Custom blocks can be on both : a recently freed block might
1121      have been just re-allocated.
1122      So, first search the malloc-ed block, as the most recent
1123      block is the probable cause of error.
1124      We however detect and report that this is a recently re-allocated
1125      block. */
1126   /* -- Search for a currently malloc'd block which might bracket it. -- */
1127   VG_(HT_ResetIter)(MC_(malloc_list));
1128   while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
1129      if (addr_is_in_MC_Chunk_default_REDZONE_SZB(mc, a)) {
1130         ai->tag = Addr_Block;
1131         ai->Addr.Block.block_kind = Block_Mallocd;
1132         if (MC_(get_freed_block_bracketting)( a ))
1133            ai->Addr.Block.block_desc = "recently re-allocated block";
1134         else
1135            ai->Addr.Block.block_desc = "block";
1136         ai->Addr.Block.block_szB  = mc->szB;
1137         ai->Addr.Block.rwoffset   = (Word)a - (Word)mc->data;
1138         ai->Addr.Block.lastchange = mc->where;
1139         return;
1140      }
1141   }
1142   /* -- Search for a recently freed block which might bracket it. -- */
1143   mc = MC_(get_freed_block_bracketting)( a );
1144   if (mc) {
1145      ai->tag = Addr_Block;
1146      ai->Addr.Block.block_kind = Block_Freed;
1147      ai->Addr.Block.block_desc = "block";
1148      ai->Addr.Block.block_szB  = mc->szB;
1149      ai->Addr.Block.rwoffset   = (Word)a - (Word)mc->data;
1150      ai->Addr.Block.lastchange = mc->where;
1151      return;
1152   }
1153   /* -- Perhaps the variable type/location data describes it? -- */
1154   ai->Addr.Variable.descr1
1155      = VG_(newXA)( VG_(malloc), "mc.da.descr1",
1156                    VG_(free), sizeof(HChar) );
1157   ai->Addr.Variable.descr2
1158      = VG_(newXA)( VG_(malloc), "mc.da.descr2",
1159                    VG_(free), sizeof(HChar) );
1160
1161   (void) VG_(get_data_description)( ai->Addr.Variable.descr1,
1162                                     ai->Addr.Variable.descr2, a );
1163   /* If there's nothing in descr1/2, free them.  Why is it safe to to
1164      VG_(indexXA) at zero here?  Because VG_(get_data_description)
1165      guarantees to zero terminate descr1/2 regardless of the outcome
1166      of the call.  So there's always at least one element in each XA
1167      after the call.
1168   */
1169   if (0 == VG_(strlen)( VG_(indexXA)( ai->Addr.Variable.descr1, 0 ))) {
1170      VG_(deleteXA)( ai->Addr.Variable.descr1 );
1171      ai->Addr.Variable.descr1 = NULL;
1172   }
1173   if (0 == VG_(strlen)( VG_(indexXA)( ai->Addr.Variable.descr2, 0 ))) {
1174      VG_(deleteXA)( ai->Addr.Variable.descr2 );
1175      ai->Addr.Variable.descr2 = NULL;
1176   }
1177   /* Assume (assert) that VG_(get_data_description) fills in descr1
1178      before it fills in descr2 */
1179   if (ai->Addr.Variable.descr1 == NULL)
1180      tl_assert(ai->Addr.Variable.descr2 == NULL);
1181   /* So did we get lucky? */
1182   if (ai->Addr.Variable.descr1 != NULL) {
1183      ai->tag = Addr_Variable;
1184      return;
1185   }
1186   /* -- Have a look at the low level data symbols - perhaps it's in
1187      there. -- */
1188   VG_(memset)( &ai->Addr.DataSym.name,
1189                0, sizeof(ai->Addr.DataSym.name));
1190   if (VG_(get_datasym_and_offset)(
1191             a, &ai->Addr.DataSym.name[0],
1192             sizeof(ai->Addr.DataSym.name)-1,
1193             &ai->Addr.DataSym.offset )) {
1194      ai->tag = Addr_DataSym;
1195      tl_assert( ai->Addr.DataSym.name
1196                    [ sizeof(ai->Addr.DataSym.name)-1 ] == 0);
1197      return;
1198   }
1199   /* -- Perhaps it's on a thread's stack? -- */
1200   VG_(thread_stack_reset_iter)(&tid);
1201   while ( VG_(thread_stack_next)(&tid, &stack_min, &stack_max) ) {
1202      if (stack_min - VG_STACK_REDZONE_SZB <= a && a <= stack_max) {
1203         ai->tag            = Addr_Stack;
1204         ai->Addr.Stack.tid = tid;
1205         return;
1206      }
1207   }
1208   /* -- last ditch attempt at classification -- */
1209   tl_assert( sizeof(ai->Addr.SectKind.objname) > 4 );
1210   VG_(memset)( &ai->Addr.SectKind.objname,
1211                0, sizeof(ai->Addr.SectKind.objname));
1212   VG_(strcpy)( ai->Addr.SectKind.objname, "???" );
1213   sect = VG_(DebugInfo_sect_kind)( &ai->Addr.SectKind.objname[0],
1214                                    sizeof(ai->Addr.SectKind.objname)-1, a);
1215   if (sect != Vg_SectUnknown) {
1216      ai->tag = Addr_SectKind;
1217      ai->Addr.SectKind.kind = sect;
1218      tl_assert( ai->Addr.SectKind.objname
1219                    [ sizeof(ai->Addr.SectKind.objname)-1 ] == 0);
1220      return;
1221   }
1222   /* -- Clueless ... -- */
1223   ai->tag = Addr_Unknown;
1224   return;
1225}
1226
1227void MC_(pp_describe_addr) ( Addr a )
1228{
1229   AddrInfo ai;
1230
1231   ai.tag = Addr_Undescribed;
1232   describe_addr (a, &ai);
1233   mc_pp_AddrInfo (a, &ai, /* maybe_gcc */ False);
1234}
1235
1236/* Fill in *origin_ec as specified by otag, or NULL it out if otag
1237   does not refer to a known origin. */
1238static void update_origin ( /*OUT*/ExeContext** origin_ec,
1239                            UInt otag )
1240{
1241   UInt ecu = otag & ~3;
1242   *origin_ec = NULL;
1243   if (VG_(is_plausible_ECU)(ecu)) {
1244      *origin_ec = VG_(get_ExeContext_from_ECU)( ecu );
1245   }
1246}
1247
1248/* Updates the copy with address info if necessary (but not for all errors). */
1249UInt MC_(update_Error_extra)( Error* err )
1250{
1251   MC_Error* extra = VG_(get_error_extra)(err);
1252
1253   switch (VG_(get_error_kind)(err)) {
1254   // These ones don't have addresses associated with them, and so don't
1255   // need any updating.
1256   case Err_CoreMem:
1257   //case Err_Value:
1258   //case Err_Cond:
1259   case Err_Overlap:
1260   // For Err_Leaks the returned size does not matter -- they are always
1261   // shown with VG_(unique_error)() so they 'extra' not copied.  But
1262   // we make it consistent with the others.
1263   case Err_Leak:
1264      return sizeof(MC_Error);
1265
1266   // For value errors, get the ExeContext corresponding to the
1267   // origin tag.  Note that it is a kludge to assume that
1268   // a length-1 trace indicates a stack origin.  FIXME.
1269   case Err_Value:
1270      update_origin( &extra->Err.Value.origin_ec,
1271                     extra->Err.Value.otag );
1272      return sizeof(MC_Error);
1273   case Err_Cond:
1274      update_origin( &extra->Err.Cond.origin_ec,
1275                     extra->Err.Cond.otag );
1276      return sizeof(MC_Error);
1277   case Err_RegParam:
1278      update_origin( &extra->Err.RegParam.origin_ec,
1279                     extra->Err.RegParam.otag );
1280      return sizeof(MC_Error);
1281
1282   // These ones always involve a memory address.
1283   case Err_Addr:
1284      describe_addr ( VG_(get_error_address)(err),
1285                      &extra->Err.Addr.ai );
1286      return sizeof(MC_Error);
1287   case Err_MemParam:
1288      describe_addr ( VG_(get_error_address)(err),
1289                      &extra->Err.MemParam.ai );
1290      update_origin( &extra->Err.MemParam.origin_ec,
1291                     extra->Err.MemParam.otag );
1292      return sizeof(MC_Error);
1293   case Err_Jump:
1294      describe_addr ( VG_(get_error_address)(err),
1295                      &extra->Err.Jump.ai );
1296      return sizeof(MC_Error);
1297   case Err_User:
1298      describe_addr ( VG_(get_error_address)(err),
1299                      &extra->Err.User.ai );
1300      update_origin( &extra->Err.User.origin_ec,
1301                     extra->Err.User.otag );
1302      return sizeof(MC_Error);
1303   case Err_Free:
1304      describe_addr ( VG_(get_error_address)(err),
1305                      &extra->Err.Free.ai );
1306      return sizeof(MC_Error);
1307   case Err_IllegalMempool:
1308      describe_addr ( VG_(get_error_address)(err),
1309                      &extra->Err.IllegalMempool.ai );
1310      return sizeof(MC_Error);
1311
1312   // Err_FreeMismatches have already had their address described;  this is
1313   // possible because we have the MC_Chunk on hand when the error is
1314   // detected.  However, the address may be part of a user block, and if so
1315   // we override the pre-determined description with a user block one.
1316   case Err_FreeMismatch: {
1317      tl_assert(extra && Block_Mallocd ==
1318                extra->Err.FreeMismatch.ai.Addr.Block.block_kind);
1319      (void)client_block_maybe_describe( VG_(get_error_address)(err),
1320                                        &extra->Err.FreeMismatch.ai );
1321      return sizeof(MC_Error);
1322   }
1323
1324   default: VG_(tool_panic)("mc_update_extra: bad errkind");
1325   }
1326}
1327
1328
1329static Bool client_block_maybe_describe( Addr a,
1330                                         /*OUT*/AddrInfo* ai )
1331{
1332   UWord      i;
1333   CGenBlock* cgbs = NULL;
1334   UWord      cgb_used = 0;
1335
1336   MC_(get_ClientBlock_array)( &cgbs, &cgb_used );
1337   if (cgbs == NULL)
1338      tl_assert(cgb_used == 0);
1339
1340   /* Perhaps it's a general block ? */
1341   for (i = 0; i < cgb_used; i++) {
1342      if (cgbs[i].start == 0 && cgbs[i].size == 0)
1343         continue;
1344      // Use zero as the redzone for client blocks.
1345      if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size, 0)) {
1346         ai->tag = Addr_Block;
1347         ai->Addr.Block.block_kind = Block_UserG;
1348         ai->Addr.Block.block_desc = cgbs[i].desc;
1349         ai->Addr.Block.block_szB  = cgbs[i].size;
1350         ai->Addr.Block.rwoffset   = (Word)(a) - (Word)(cgbs[i].start);
1351         ai->Addr.Block.lastchange = cgbs[i].where;
1352         return True;
1353      }
1354   }
1355   return False;
1356}
1357
1358
1359static Bool mempool_block_maybe_describe( Addr a,
1360                                          /*OUT*/AddrInfo* ai )
1361{
1362   MC_Mempool* mp;
1363   tl_assert( MC_(mempool_list) );
1364
1365   VG_(HT_ResetIter)( MC_(mempool_list) );
1366   while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
1367      if (mp->chunks != NULL) {
1368         MC_Chunk* mc;
1369         VG_(HT_ResetIter)(mp->chunks);
1370         while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
1371            if (addr_is_in_MC_Chunk_with_REDZONE_SZB(mc, a, mp->rzB)) {
1372               ai->tag = Addr_Block;
1373               ai->Addr.Block.block_kind = Block_MempoolChunk;
1374               ai->Addr.Block.block_desc = "block";
1375               ai->Addr.Block.block_szB  = mc->szB;
1376               ai->Addr.Block.rwoffset   = (Word)a - (Word)mc->data;
1377               ai->Addr.Block.lastchange = mc->where;
1378               return True;
1379            }
1380         }
1381      }
1382   }
1383   return False;
1384}
1385
1386
1387/*------------------------------------------------------------*/
1388/*--- Suppressions                                         ---*/
1389/*------------------------------------------------------------*/
1390
1391typedef
1392   enum {
1393      ParamSupp,     // Bad syscall params
1394      UserSupp,      // Errors arising from client-request checks
1395      CoreMemSupp,   // Memory errors in core (pthread ops, signal handling)
1396
1397      // Undefined value errors of given size
1398      Value1Supp, Value2Supp, Value4Supp, Value8Supp, Value16Supp,
1399
1400      // Undefined value error in conditional.
1401      CondSupp,
1402
1403      // Unaddressable read/write attempt at given size
1404      Addr1Supp, Addr2Supp, Addr4Supp, Addr8Supp, Addr16Supp,
1405
1406      JumpSupp,      // Jump to unaddressable target
1407      FreeSupp,      // Invalid or mismatching free
1408      OverlapSupp,   // Overlapping blocks in memcpy(), strcpy(), etc
1409      LeakSupp,      // Something to be suppressed in a leak check.
1410      MempoolSupp,   // Memory pool suppression.
1411   }
1412   MC_SuppKind;
1413
1414Bool MC_(is_recognised_suppression) ( Char* name, Supp* su )
1415{
1416   SuppKind skind;
1417
1418   if      (VG_STREQ(name, "Param"))   skind = ParamSupp;
1419   else if (VG_STREQ(name, "User"))    skind = UserSupp;
1420   else if (VG_STREQ(name, "CoreMem")) skind = CoreMemSupp;
1421   else if (VG_STREQ(name, "Addr1"))   skind = Addr1Supp;
1422   else if (VG_STREQ(name, "Addr2"))   skind = Addr2Supp;
1423   else if (VG_STREQ(name, "Addr4"))   skind = Addr4Supp;
1424   else if (VG_STREQ(name, "Addr8"))   skind = Addr8Supp;
1425   else if (VG_STREQ(name, "Addr16"))  skind = Addr16Supp;
1426   else if (VG_STREQ(name, "Jump"))    skind = JumpSupp;
1427   else if (VG_STREQ(name, "Free"))    skind = FreeSupp;
1428   else if (VG_STREQ(name, "Leak"))    skind = LeakSupp;
1429   else if (VG_STREQ(name, "Overlap")) skind = OverlapSupp;
1430   else if (VG_STREQ(name, "Mempool")) skind = MempoolSupp;
1431   else if (VG_STREQ(name, "Cond"))    skind = CondSupp;
1432   else if (VG_STREQ(name, "Value0"))  skind = CondSupp; /* backwards compat */
1433   else if (VG_STREQ(name, "Value1"))  skind = Value1Supp;
1434   else if (VG_STREQ(name, "Value2"))  skind = Value2Supp;
1435   else if (VG_STREQ(name, "Value4"))  skind = Value4Supp;
1436   else if (VG_STREQ(name, "Value8"))  skind = Value8Supp;
1437   else if (VG_STREQ(name, "Value16")) skind = Value16Supp;
1438   else
1439      return False;
1440
1441   VG_(set_supp_kind)(su, skind);
1442   return True;
1443}
1444
1445Bool MC_(read_extra_suppression_info) ( Int fd, Char** bufpp,
1446                                        SizeT* nBufp, Supp *su )
1447{
1448   Bool eof;
1449
1450   if (VG_(get_supp_kind)(su) == ParamSupp) {
1451      eof = VG_(get_line) ( fd, bufpp, nBufp, NULL );
1452      if (eof) return False;
1453      VG_(set_supp_string)(su, VG_(strdup)("mc.resi.1", *bufpp));
1454   }
1455   return True;
1456}
1457
1458Bool MC_(error_matches_suppression) ( Error* err, Supp* su )
1459{
1460   Int       su_szB;
1461   MC_Error* extra = VG_(get_error_extra)(err);
1462   ErrorKind ekind = VG_(get_error_kind )(err);
1463
1464   switch (VG_(get_supp_kind)(su)) {
1465      case ParamSupp:
1466         return ((ekind == Err_RegParam || ekind == Err_MemParam)
1467              && VG_STREQ(VG_(get_error_string)(err),
1468                          VG_(get_supp_string)(su)));
1469
1470      case UserSupp:
1471         return (ekind == Err_User);
1472
1473      case CoreMemSupp:
1474         return (ekind == Err_CoreMem
1475              && VG_STREQ(VG_(get_error_string)(err),
1476                          VG_(get_supp_string)(su)));
1477
1478      case Value1Supp: su_szB = 1; goto value_case;
1479      case Value2Supp: su_szB = 2; goto value_case;
1480      case Value4Supp: su_szB = 4; goto value_case;
1481      case Value8Supp: su_szB = 8; goto value_case;
1482      case Value16Supp:su_szB =16; goto value_case;
1483      value_case:
1484         return (ekind == Err_Value && extra->Err.Value.szB == su_szB);
1485
1486      case CondSupp:
1487         return (ekind == Err_Cond);
1488
1489      case Addr1Supp: su_szB = 1; goto addr_case;
1490      case Addr2Supp: su_szB = 2; goto addr_case;
1491      case Addr4Supp: su_szB = 4; goto addr_case;
1492      case Addr8Supp: su_szB = 8; goto addr_case;
1493      case Addr16Supp:su_szB =16; goto addr_case;
1494      addr_case:
1495         return (ekind == Err_Addr && extra->Err.Addr.szB == su_szB);
1496
1497      case JumpSupp:
1498         return (ekind == Err_Jump);
1499
1500      case FreeSupp:
1501         return (ekind == Err_Free || ekind == Err_FreeMismatch);
1502
1503      case OverlapSupp:
1504         return (ekind == Err_Overlap);
1505
1506      case LeakSupp:
1507         return (ekind == Err_Leak);
1508
1509      case MempoolSupp:
1510         return (ekind == Err_IllegalMempool);
1511
1512      default:
1513         VG_(printf)("Error:\n"
1514                     "  unknown suppression type %d\n",
1515                     VG_(get_supp_kind)(su));
1516         VG_(tool_panic)("unknown suppression type in "
1517                         "MC_(error_matches_suppression)");
1518   }
1519}
1520
1521Char* MC_(get_error_name) ( Error* err )
1522{
1523   switch (VG_(get_error_kind)(err)) {
1524   case Err_RegParam:       return "Param";
1525   case Err_MemParam:       return "Param";
1526   case Err_User:           return "User";
1527   case Err_FreeMismatch:   return "Free";
1528   case Err_IllegalMempool: return "Mempool";
1529   case Err_Free:           return "Free";
1530   case Err_Jump:           return "Jump";
1531   case Err_CoreMem:        return "CoreMem";
1532   case Err_Overlap:        return "Overlap";
1533   case Err_Leak:           return "Leak";
1534   case Err_Cond:           return "Cond";
1535   case Err_Addr: {
1536      MC_Error* extra = VG_(get_error_extra)(err);
1537      switch ( extra->Err.Addr.szB ) {
1538      case 1:               return "Addr1";
1539      case 2:               return "Addr2";
1540      case 4:               return "Addr4";
1541      case 8:               return "Addr8";
1542      case 16:              return "Addr16";
1543      default:              VG_(tool_panic)("unexpected size for Addr");
1544      }
1545   }
1546   case Err_Value: {
1547      MC_Error* extra = VG_(get_error_extra)(err);
1548      switch ( extra->Err.Value.szB ) {
1549      case 1:               return "Value1";
1550      case 2:               return "Value2";
1551      case 4:               return "Value4";
1552      case 8:               return "Value8";
1553      case 16:              return "Value16";
1554      default:              VG_(tool_panic)("unexpected size for Value");
1555      }
1556   }
1557   default:                 VG_(tool_panic)("get_error_name: unexpected type");
1558   }
1559}
1560
1561Bool MC_(get_extra_suppression_info) ( Error* err,
1562                                       /*OUT*/Char* buf, Int nBuf )
1563{
1564   ErrorKind ekind = VG_(get_error_kind )(err);
1565   tl_assert(buf);
1566   tl_assert(nBuf >= 16); // stay sane
1567   if (Err_RegParam == ekind || Err_MemParam == ekind) {
1568      Char* errstr = VG_(get_error_string)(err);
1569      tl_assert(errstr);
1570      VG_(snprintf)(buf, nBuf-1, "%s", errstr);
1571      return True;
1572   } else {
1573      return False;
1574   }
1575}
1576
1577
1578/*--------------------------------------------------------------------*/
1579/*--- end                                              mc_errors.c ---*/
1580/*--------------------------------------------------------------------*/
1581