1
2/*--------------------------------------------------------------------*/
3/*--- Error management for Helgrind.                               ---*/
4/*---                                                  hg_errors.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8   This file is part of Helgrind, a Valgrind tool for detecting errors
9   in threaded programs.
10
11   Copyright (C) 2007-2011 OpenWorks Ltd
12      info@open-works.co.uk
13
14   This program is free software; you can redistribute it and/or
15   modify it under the terms of the GNU General Public License as
16   published by the Free Software Foundation; either version 2 of the
17   License, or (at your option) any later version.
18
19   This program is distributed in the hope that it will be useful, but
20   WITHOUT ANY WARRANTY; without even the implied warranty of
21   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22   General Public License for more details.
23
24   You should have received a copy of the GNU General Public License
25   along with this program; if not, write to the Free Software
26   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27   02111-1307, USA.
28
29   The GNU General Public License is contained in the file COPYING.
30*/
31
32#include "pub_tool_basics.h"
33#include "pub_tool_libcbase.h"
34#include "pub_tool_libcassert.h"
35#include "pub_tool_libcprint.h"
36#include "pub_tool_execontext.h"
37#include "pub_tool_errormgr.h"
38#include "pub_tool_wordfm.h"
39#include "pub_tool_xarray.h"
40#include "pub_tool_debuginfo.h"
41#include "pub_tool_threadstate.h"
42#include "pub_tool_options.h"     // VG_(clo_xml)
43
44#include "hg_basics.h"
45#include "hg_wordset.h"
46#include "hg_lock_n_thread.h"
47#include "libhb.h"
48#include "hg_errors.h"            /* self */
49
50
51/*----------------------------------------------------------------*/
52/*--- Error management -- storage                              ---*/
53/*----------------------------------------------------------------*/
54
55/* maps (by value) strings to a copy of them in ARENA_TOOL */
56
57static WordFM* string_table = NULL;
58
59ULong HG_(stats__string_table_queries) = 0;
60
61ULong HG_(stats__string_table_get_map_size) ( void ) {
62   return string_table ? (ULong)VG_(sizeFM)(string_table) : 0;
63}
64
65static Word string_table_cmp ( UWord s1, UWord s2 ) {
66   return (Word)VG_(strcmp)( (HChar*)s1, (HChar*)s2 );
67}
68
69static HChar* string_table_strdup ( HChar* str ) {
70   HChar* copy = NULL;
71   HG_(stats__string_table_queries)++;
72   if (!str)
73      str = "(null)";
74   if (!string_table) {
75      string_table = VG_(newFM)( HG_(zalloc), "hg.sts.1",
76                                 HG_(free), string_table_cmp );
77      tl_assert(string_table);
78   }
79   if (VG_(lookupFM)( string_table,
80                      NULL, (Word*)&copy, (Word)str )) {
81      tl_assert(copy);
82      if (0) VG_(printf)("string_table_strdup: %p -> %p\n", str, copy );
83      return copy;
84   } else {
85      copy = HG_(strdup)("hg.sts.2", str);
86      tl_assert(copy);
87      VG_(addToFM)( string_table, (Word)copy, (Word)copy );
88      return copy;
89   }
90}
91
92/* maps from Lock .unique fields to LockP*s */
93
94static WordFM* map_LockN_to_P = NULL;
95
96ULong HG_(stats__LockN_to_P_queries) = 0;
97
98ULong HG_(stats__LockN_to_P_get_map_size) ( void ) {
99   return map_LockN_to_P ? (ULong)VG_(sizeFM)(map_LockN_to_P) : 0;
100}
101
102static Word lock_unique_cmp ( UWord lk1W, UWord lk2W )
103{
104   Lock* lk1 = (Lock*)lk1W;
105   Lock* lk2 = (Lock*)lk2W;
106   tl_assert( HG_(is_sane_LockNorP)(lk1) );
107   tl_assert( HG_(is_sane_LockNorP)(lk2) );
108   if (lk1->unique < lk2->unique) return -1;
109   if (lk1->unique > lk2->unique) return 1;
110   return 0;
111}
112
113/* Given a normal Lock (LockN), convert it to a persistent Lock
114   (LockP).  In some cases the LockN could be invalid (if it's been
115   freed), so we enquire, in hg_main.c's admin_locks list, whether it
116   is in fact valid.  If allowed_to_be_invalid is True, then it's OK
117   for the LockN to be invalid, in which case Lock_INVALID is
118   returned.  In all other cases, we insist that the LockN is a valid
119   lock, and return its corresponding LockP.
120
121   Why can LockNs sometimes be invalid?  Because they are harvested
122   from locksets that are attached to the OldRef info for conflicting
123   threads.  By the time we detect a race, the some of the elements of
124   the lockset may have been destroyed by the client, in which case
125   the corresponding Lock structures we maintain will have been freed.
126
127   So we check that each LockN is a member of the admin_locks double
128   linked list of all Lock structures.  That stops us prodding around
129   in potentially freed-up Lock structures.  However, it's not quite a
130   proper check: if a new Lock has been reallocated at the same
131   address as one which was previously freed, we'll wind up copying
132   the new one as the basis for the LockP, which is completely bogus
133   because it is unrelated to the previous Lock that lived there.
134   Let's hope that doesn't happen too often.
135*/
136static Lock* mk_LockP_from_LockN ( Lock* lkn,
137                                   Bool allowed_to_be_invalid )
138{
139   Lock* lkp = NULL;
140   HG_(stats__LockN_to_P_queries)++;
141
142   /* First off, let's do some sanity checks.  If
143      allowed_to_be_invalid is False, we _must_ be able to find 'lkn'
144      in admin_locks; else we must assert.  If it is True, it's OK for
145      it not to be findable, but in that case we must return
146      Lock_INVALID right away. */
147   Lock* lock_list = HG_(get_admin_locks)();
148   while (lock_list) {
149      if (lock_list == lkn)
150         break;
151      lock_list = lock_list->admin_next;
152   }
153   if (lock_list == NULL) {
154      /* We didn't find it.  That possibility has to be OK'd by the
155         caller. */
156      tl_assert(allowed_to_be_invalid);
157      return Lock_INVALID;
158   }
159
160   /* So we must be looking at a valid LockN. */
161   tl_assert( HG_(is_sane_LockN)(lkn) );
162
163   if (!map_LockN_to_P) {
164      map_LockN_to_P = VG_(newFM)( HG_(zalloc), "hg.mLPfLN.1",
165                                   HG_(free), lock_unique_cmp );
166      tl_assert(map_LockN_to_P);
167   }
168   if (!VG_(lookupFM)( map_LockN_to_P, NULL, (Word*)&lkp, (Word)lkn)) {
169      lkp = HG_(zalloc)( "hg.mLPfLN.2", sizeof(Lock) );
170      *lkp = *lkn;
171      lkp->admin_next = NULL;
172      lkp->admin_prev = NULL;
173      lkp->magic = LockP_MAGIC;
174      /* Forget about the bag of lock holders - don't copy that.
175         Also, acquired_at should be NULL whenever heldBy is, and vice
176         versa.  Also forget about the associated libhb synch object. */
177      lkp->heldW  = False;
178      lkp->heldBy = NULL;
179      lkp->acquired_at = NULL;
180      lkp->hbso = NULL;
181      VG_(addToFM)( map_LockN_to_P, (Word)lkp, (Word)lkp );
182   }
183   tl_assert( HG_(is_sane_LockP)(lkp) );
184   return lkp;
185}
186
187/* Expand a WordSet of LockN*'s into a NULL-terminated vector of
188   LockP*'s.  Any LockN's that can't be converted into a LockP
189   (because they have been freed, see comment on mk_LockP_from_LockN)
190   are converted instead into the value Lock_INVALID.  Hence the
191   returned vector is a sequence: zero or more (valid LockP* or
192   LockN_INVALID), terminated by a NULL. */
193static
194Lock** enumerate_WordSet_into_LockP_vector( WordSetU* univ_lsets,
195                                            WordSetID lockset,
196                                            Bool allowed_to_be_invalid )
197{
198   tl_assert(univ_lsets);
199   tl_assert( HG_(plausibleWS)(univ_lsets, lockset) );
200   UWord  nLocks = HG_(cardinalityWS)(univ_lsets, lockset);
201   Lock** lockPs = HG_(zalloc)( "hg.eWSiLPa",
202                                (nLocks+1) * sizeof(Lock*) );
203   tl_assert(lockPs);
204   tl_assert(lockPs[nLocks] == NULL); /* pre-NULL terminated */
205   UWord* lockNs  = NULL;
206   UWord  nLockNs = 0;
207   if (nLocks > 0)  {
208      /* HG_(getPayloadWS) doesn't assign non-NULL to &lockNs if the
209         lockset is empty; hence the guarding "if".  Sigh. */
210      HG_(getPayloadWS)( &lockNs, &nLockNs, univ_lsets, lockset );
211      tl_assert(lockNs);
212   }
213   UWord i;
214   /* Convert to LockPs. */
215   for (i = 0; i < nLockNs; i++) {
216      lockPs[i] = mk_LockP_from_LockN( (Lock*)lockNs[i],
217                                       allowed_to_be_invalid );
218   }
219   return lockPs;
220}
221
222/* Get the number of useful elements in a vector created by
223   enumerate_WordSet_into_LockP_vector.  Returns both the total number
224   of elements (not including the terminating NULL) and the number of
225   non-Lock_INVALID elements. */
226static void count_LockP_vector ( /*OUT*/UWord* nLocks,
227                                 /*OUT*/UWord* nLocksValid,
228                                 Lock** vec )
229{
230   tl_assert(vec);
231   *nLocks = *nLocksValid = 0;
232   UWord n = 0;
233   while (vec[n]) {
234      (*nLocks)++;
235      if (vec[n] != Lock_INVALID)
236         (*nLocksValid)++;
237      n++;
238   }
239}
240
241/* Find out whether 'lk' is in 'vec'. */
242static Bool elem_LockP_vector ( Lock** vec, Lock* lk )
243{
244   tl_assert(vec);
245   tl_assert(lk);
246   UWord n = 0;
247   while (vec[n]) {
248      if (vec[n] == lk)
249         return True;
250      n++;
251   }
252   return False;
253}
254
255
256/* Errors:
257
258      race: program counter
259            read or write
260            data size
261            previous state
262            current state
263
264      FIXME: how does state printing interact with lockset gc?
265      Are the locksets in prev/curr state always valid?
266      Ditto question for the threadsets
267          ThreadSets - probably are always valid if Threads
268          are never thrown away.
269          LockSets - could at least print the lockset elements that
270          correspond to actual locks at the time of printing.  Hmm.
271*/
272
273/* Error kinds */
274typedef
275   enum {
276      XE_Race=1101,      // race
277      XE_UnlockUnlocked, // unlocking a not-locked lock
278      XE_UnlockForeign,  // unlocking a lock held by some other thread
279      XE_UnlockBogus,    // unlocking an address not known to be a lock
280      XE_PthAPIerror,    // error from the POSIX pthreads API
281      XE_LockOrder,      // lock order error
282      XE_Misc            // misc other error (w/ string to describe it)
283   }
284   XErrorTag;
285
286/* Extra contexts for kinds */
287typedef
288   struct  {
289      XErrorTag tag;
290      union {
291         struct {
292            Addr        data_addr;
293            Int         szB;
294            Bool        isWrite;
295            Thread*     thr;
296            Lock**      locksHeldW;
297            /* descr1/2 provide a description of stack/global locs */
298            XArray*     descr1; /* XArray* of HChar */
299            XArray*     descr2; /* XArray* of HChar */
300            /* halloc/haddr/hszB describe the addr if it is a heap block. */
301            ExeContext* hctxt;
302            Addr        haddr;
303            SizeT       hszB;
304            /* h1_* and h2_* provide some description of a previously
305               observed access with which we are conflicting. */
306            Thread*     h1_ct; /* non-NULL means h1 info present */
307            ExeContext* h1_ct_mbsegstartEC;
308            ExeContext* h1_ct_mbsegendEC;
309            Thread*     h2_ct; /* non-NULL means h2 info present */
310            ExeContext* h2_ct_accEC;
311            Int         h2_ct_accSzB;
312            Bool        h2_ct_accIsW;
313            Lock**      h2_ct_locksHeldW;
314         } Race;
315         struct {
316            Thread* thr;  /* doing the unlocking */
317            Lock*   lock; /* lock (that is already unlocked) */
318         } UnlockUnlocked;
319         struct {
320            Thread* thr;    /* doing the unlocking */
321            Thread* owner;  /* thread that actually holds the lock */
322            Lock*   lock;   /* lock (that is held by 'owner') */
323         } UnlockForeign;
324         struct {
325            Thread* thr;     /* doing the unlocking */
326            Addr    lock_ga; /* purported address of the lock */
327         } UnlockBogus;
328         struct {
329            Thread* thr;
330            HChar*  fnname; /* persistent, in tool-arena */
331            Word    err;    /* pth error code */
332            HChar*  errstr; /* persistent, in tool-arena */
333         } PthAPIerror;
334         struct {
335            Thread*     thr;
336            /* The first 4 fields describe the previously observed
337               (should-be) ordering. */
338            Addr        shouldbe_earlier_ga;
339            Addr        shouldbe_later_ga;
340            ExeContext* shouldbe_earlier_ec;
341            ExeContext* shouldbe_later_ec;
342            /* In principle we need to record two more stacks, from
343               this thread, when acquiring the locks in the "wrong"
344               order.  In fact the wallclock-later acquisition by this
345               thread is recorded in the main stack for this error.
346               So we only need a stack for the earlier acquisition by
347               this thread. */
348            ExeContext* actual_earlier_ec;
349         } LockOrder;
350         struct {
351            Thread*     thr;
352            HChar*      errstr; /* persistent, in tool-arena */
353            HChar*      auxstr; /* optional, persistent, in tool-arena */
354            ExeContext* auxctx; /* optional */
355         } Misc;
356      } XE;
357   }
358   XError;
359
360static void init_XError ( XError* xe ) {
361   VG_(memset)(xe, 0, sizeof(*xe) );
362   xe->tag = XE_Race-1; /* bogus */
363}
364
365
366/* Extensions of suppressions */
367typedef
368   enum {
369      XS_Race=1201, /* race */
370      XS_FreeMemLock,
371      XS_UnlockUnlocked,
372      XS_UnlockForeign,
373      XS_UnlockBogus,
374      XS_PthAPIerror,
375      XS_LockOrder,
376      XS_Misc
377   }
378   XSuppTag;
379
380
381/* Updates the copy with address info if necessary. */
382UInt HG_(update_extra) ( Error* err )
383{
384   XError* xe = (XError*)VG_(get_error_extra)(err);
385   tl_assert(xe);
386   //if (extra != NULL && Undescribed == extra->addrinfo.akind) {
387   //   describe_addr ( VG_(get_error_address)(err), &(extra->addrinfo) );
388   //}
389
390   if (xe->tag == XE_Race) {
391
392      /* Note the set of locks that the thread is (w-)holding.
393         Convert the WordSetID of LockN*'s into a NULL-terminated
394         vector of LockP*'s.  We don't expect to encounter any invalid
395         LockNs in this conversion. */
396      tl_assert(xe->XE.Race.thr);
397      xe->XE.Race.locksHeldW
398         = enumerate_WordSet_into_LockP_vector(
399              HG_(get_univ_lsets)(),
400              xe->XE.Race.thr->locksetW,
401              False/*!allowed_to_be_invalid*/
402           );
403
404      /* See if we can come up with a source level description of the
405         raced-upon address.  This is potentially expensive, which is
406         why it's only done at the update_extra point, not when the
407         error is initially created. */
408      static Int xxx = 0;
409      xxx++;
410      if (0)
411         VG_(printf)("HG_(update_extra): "
412                     "%d conflicting-event queries\n", xxx);
413
414      tl_assert(!xe->XE.Race.hctxt);
415      tl_assert(!xe->XE.Race.descr1);
416      tl_assert(!xe->XE.Race.descr2);
417
418      /* First, see if it's in any heap block.  Unfortunately this
419         means a linear search through all allocated heap blocks.  The
420         assertion says that if it's detected as a heap block, then we
421         must have an allocation context for it, since all heap blocks
422         should have an allocation context. */
423      Bool is_heapblock
424         = HG_(mm_find_containing_block)(
425              &xe->XE.Race.hctxt, &xe->XE.Race.haddr, &xe->XE.Race.hszB,
426              xe->XE.Race.data_addr
427           );
428      tl_assert(is_heapblock == (xe->XE.Race.hctxt != NULL));
429
430      if (!xe->XE.Race.hctxt) {
431         /* It's not in any heap block.  See if we can map it to a
432            stack or global symbol. */
433
434         xe->XE.Race.descr1
435            = VG_(newXA)( HG_(zalloc), "hg.update_extra.Race.descr1",
436                          HG_(free), sizeof(HChar) );
437         xe->XE.Race.descr2
438            = VG_(newXA)( HG_(zalloc), "hg.update_extra.Race.descr2",
439                          HG_(free), sizeof(HChar) );
440
441         (void) VG_(get_data_description)( xe->XE.Race.descr1,
442                                           xe->XE.Race.descr2,
443                                           xe->XE.Race.data_addr );
444
445         /* If there's nothing in descr1/2, free it.  Why is it safe to
446            to VG_(indexXA) at zero here?  Because
447            VG_(get_data_description) guarantees to zero terminate
448            descr1/2 regardless of the outcome of the call.  So there's
449            always at least one element in each XA after the call.
450         */
451         if (0 == VG_(strlen)( VG_(indexXA)( xe->XE.Race.descr1, 0 ))) {
452            VG_(deleteXA)( xe->XE.Race.descr1 );
453            xe->XE.Race.descr1 = NULL;
454         }
455         if (0 == VG_(strlen)( VG_(indexXA)( xe->XE.Race.descr2, 0 ))) {
456            VG_(deleteXA)( xe->XE.Race.descr2 );
457            xe->XE.Race.descr2 = NULL;
458         }
459      }
460
461      /* And poke around in the conflicting-event map, to see if we
462         can rustle up a plausible-looking conflicting memory access
463         to show. */
464      if (HG_(clo_history_level) >= 2) {
465         Thr*        thrp            = NULL;
466         ExeContext* wherep          = NULL;
467         Addr        acc_addr        = xe->XE.Race.data_addr;
468         Int         acc_szB         = xe->XE.Race.szB;
469         Thr*        acc_thr         = xe->XE.Race.thr->hbthr;
470         Bool        acc_isW         = xe->XE.Race.isWrite;
471         SizeT       conf_szB        = 0;
472         Bool        conf_isW        = False;
473         WordSetID   conf_locksHeldW = 0;
474         tl_assert(!xe->XE.Race.h2_ct_accEC);
475         tl_assert(!xe->XE.Race.h2_ct);
476         if (libhb_event_map_lookup(
477                &wherep, &thrp, &conf_szB, &conf_isW, &conf_locksHeldW,
478                acc_thr, acc_addr, acc_szB, acc_isW )) {
479            Thread* threadp;
480            tl_assert(wherep);
481            tl_assert(thrp);
482            threadp = libhb_get_Thr_hgthread( thrp );
483            tl_assert(threadp);
484            xe->XE.Race.h2_ct_accEC  = wherep;
485            xe->XE.Race.h2_ct        = threadp;
486            xe->XE.Race.h2_ct_accSzB = (Int)conf_szB;
487            xe->XE.Race.h2_ct_accIsW = conf_isW;
488            xe->XE.Race.h2_ct_locksHeldW
489               = enumerate_WordSet_into_LockP_vector(
490                    HG_(get_univ_lsets)(),
491                    conf_locksHeldW,
492                    True/*allowed_to_be_invalid*/
493                 );
494        }
495      }
496
497      // both NULL or both non-NULL
498      tl_assert( (!!xe->XE.Race.h2_ct) == (!!xe->XE.Race.h2_ct_accEC) );
499   }
500
501   return sizeof(XError);
502}
503
504void HG_(record_error_Race) ( Thread* thr,
505                              Addr data_addr, Int szB, Bool isWrite,
506                              Thread* h1_ct,
507                              ExeContext* h1_ct_segstart,
508                              ExeContext* h1_ct_mbsegendEC )
509{
510   XError xe;
511   tl_assert( HG_(is_sane_Thread)(thr) );
512
513#  if defined(VGO_linux)
514   /* Skip any races on locations apparently in GOTPLT sections.  This
515      is said to be caused by ld.so poking PLT table entries (or
516      whatever) when it writes the resolved address of a dynamically
517      linked routine, into the table (or whatever) when it is called
518      for the first time. */
519   {
520     VgSectKind sect = VG_(DebugInfo_sect_kind)( NULL, 0, data_addr );
521     if (0) VG_(printf)("XXXXXXXXX RACE on %#lx %s\n",
522                        data_addr, VG_(pp_SectKind)(sect));
523     /* SectPLT is required on ???-linux */
524     if (sect == Vg_SectGOTPLT) return;
525     /* SectPLT is required on ppc32/64-linux */
526     if (sect == Vg_SectPLT) return;
527   }
528#  endif
529
530   init_XError(&xe);
531   xe.tag = XE_Race;
532   xe.XE.Race.data_addr   = data_addr;
533   xe.XE.Race.szB         = szB;
534   xe.XE.Race.isWrite     = isWrite;
535   xe.XE.Race.thr         = thr;
536   tl_assert(isWrite == False || isWrite == True);
537   tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
538   /* Skip on the detailed description of the raced-on address at this
539      point; it's expensive.  Leave it for the update_extra function
540      if we ever make it that far. */
541   tl_assert(xe.XE.Race.descr1 == NULL);
542   tl_assert(xe.XE.Race.descr2 == NULL);
543   // FIXME: tid vs thr
544   // Skip on any of the conflicting-access info at this point.
545   // It's expensive to obtain, and this error is more likely than
546   // not to be discarded.  We'll fill these fields in in
547   // HG_(update_extra) just above, assuming the error ever makes
548   // it that far (unlikely).
549   xe.XE.Race.h2_ct_accSzB = 0;
550   xe.XE.Race.h2_ct_accIsW = False;
551   xe.XE.Race.h2_ct_accEC  = NULL;
552   xe.XE.Race.h2_ct        = NULL;
553   tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
554   tl_assert( thr->coretid != VG_INVALID_THREADID );
555
556   xe.XE.Race.h1_ct              = h1_ct;
557   xe.XE.Race.h1_ct_mbsegstartEC = h1_ct_segstart;
558   xe.XE.Race.h1_ct_mbsegendEC   = h1_ct_mbsegendEC;
559
560   VG_(maybe_record_error)( thr->coretid,
561                            XE_Race, data_addr, NULL, &xe );
562}
563
564void HG_(record_error_UnlockUnlocked) ( Thread* thr, Lock* lk )
565{
566   XError xe;
567   tl_assert( HG_(is_sane_Thread)(thr) );
568   tl_assert( HG_(is_sane_LockN)(lk) );
569   init_XError(&xe);
570   xe.tag = XE_UnlockUnlocked;
571   xe.XE.UnlockUnlocked.thr
572      = thr;
573   xe.XE.UnlockUnlocked.lock
574      = mk_LockP_from_LockN(lk, False/*!allowed_to_be_invalid*/);
575   // FIXME: tid vs thr
576   tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
577   tl_assert( thr->coretid != VG_INVALID_THREADID );
578   VG_(maybe_record_error)( thr->coretid,
579                            XE_UnlockUnlocked, 0, NULL, &xe );
580}
581
582void HG_(record_error_UnlockForeign) ( Thread* thr,
583                                       Thread* owner, Lock* lk )
584{
585   XError xe;
586   tl_assert( HG_(is_sane_Thread)(thr) );
587   tl_assert( HG_(is_sane_Thread)(owner) );
588   tl_assert( HG_(is_sane_LockN)(lk) );
589   init_XError(&xe);
590   xe.tag = XE_UnlockForeign;
591   xe.XE.UnlockForeign.thr   = thr;
592   xe.XE.UnlockForeign.owner = owner;
593   xe.XE.UnlockForeign.lock
594      = mk_LockP_from_LockN(lk, False/*!allowed_to_be_invalid*/);
595   // FIXME: tid vs thr
596   tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
597   tl_assert( thr->coretid != VG_INVALID_THREADID );
598   VG_(maybe_record_error)( thr->coretid,
599                            XE_UnlockForeign, 0, NULL, &xe );
600}
601
602void HG_(record_error_UnlockBogus) ( Thread* thr, Addr lock_ga )
603{
604   XError xe;
605   tl_assert( HG_(is_sane_Thread)(thr) );
606   init_XError(&xe);
607   xe.tag = XE_UnlockBogus;
608   xe.XE.UnlockBogus.thr     = thr;
609   xe.XE.UnlockBogus.lock_ga = lock_ga;
610   // FIXME: tid vs thr
611   tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
612   tl_assert( thr->coretid != VG_INVALID_THREADID );
613   VG_(maybe_record_error)( thr->coretid,
614                            XE_UnlockBogus, 0, NULL, &xe );
615}
616
617void HG_(record_error_LockOrder)(
618        Thread*     thr,
619        Addr        shouldbe_earlier_ga,
620        Addr        shouldbe_later_ga,
621        ExeContext* shouldbe_earlier_ec,
622        ExeContext* shouldbe_later_ec,
623        ExeContext* actual_earlier_ec
624     )
625{
626   XError xe;
627   tl_assert( HG_(is_sane_Thread)(thr) );
628   tl_assert(HG_(clo_track_lockorders));
629   init_XError(&xe);
630   xe.tag = XE_LockOrder;
631   xe.XE.LockOrder.thr       = thr;
632   xe.XE.LockOrder.shouldbe_earlier_ga = shouldbe_earlier_ga;
633   xe.XE.LockOrder.shouldbe_earlier_ec = shouldbe_earlier_ec;
634   xe.XE.LockOrder.shouldbe_later_ga   = shouldbe_later_ga;
635   xe.XE.LockOrder.shouldbe_later_ec   = shouldbe_later_ec;
636   xe.XE.LockOrder.actual_earlier_ec   = actual_earlier_ec;
637   // FIXME: tid vs thr
638   tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
639   tl_assert( thr->coretid != VG_INVALID_THREADID );
640   VG_(maybe_record_error)( thr->coretid,
641                            XE_LockOrder, 0, NULL, &xe );
642}
643
644void HG_(record_error_PthAPIerror) ( Thread* thr, HChar* fnname,
645                                     Word err, HChar* errstr )
646{
647   XError xe;
648   tl_assert( HG_(is_sane_Thread)(thr) );
649   tl_assert(fnname);
650   tl_assert(errstr);
651   init_XError(&xe);
652   xe.tag = XE_PthAPIerror;
653   xe.XE.PthAPIerror.thr    = thr;
654   xe.XE.PthAPIerror.fnname = string_table_strdup(fnname);
655   xe.XE.PthAPIerror.err    = err;
656   xe.XE.PthAPIerror.errstr = string_table_strdup(errstr);
657   // FIXME: tid vs thr
658   tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
659   tl_assert( thr->coretid != VG_INVALID_THREADID );
660   VG_(maybe_record_error)( thr->coretid,
661                            XE_PthAPIerror, 0, NULL, &xe );
662}
663
664void HG_(record_error_Misc_w_aux) ( Thread* thr, HChar* errstr,
665                                    HChar* auxstr, ExeContext* auxctx )
666{
667   XError xe;
668   tl_assert( HG_(is_sane_Thread)(thr) );
669   tl_assert(errstr);
670   init_XError(&xe);
671   xe.tag = XE_Misc;
672   xe.XE.Misc.thr    = thr;
673   xe.XE.Misc.errstr = string_table_strdup(errstr);
674   xe.XE.Misc.auxstr = auxstr ? string_table_strdup(auxstr) : NULL;
675   xe.XE.Misc.auxctx = auxctx;
676   // FIXME: tid vs thr
677   tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
678   tl_assert( thr->coretid != VG_INVALID_THREADID );
679   VG_(maybe_record_error)( thr->coretid,
680                            XE_Misc, 0, NULL, &xe );
681}
682
683void HG_(record_error_Misc) ( Thread* thr, HChar* errstr )
684{
685   HG_(record_error_Misc_w_aux)(thr, errstr, NULL, NULL);
686}
687
688Bool HG_(eq_Error) ( VgRes not_used, Error* e1, Error* e2 )
689{
690   XError *xe1, *xe2;
691
692   tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
693
694   xe1 = (XError*)VG_(get_error_extra)(e1);
695   xe2 = (XError*)VG_(get_error_extra)(e2);
696   tl_assert(xe1);
697   tl_assert(xe2);
698
699   switch (VG_(get_error_kind)(e1)) {
700      case XE_Race:
701         return xe1->XE.Race.szB == xe2->XE.Race.szB
702                && xe1->XE.Race.isWrite == xe2->XE.Race.isWrite
703                && (HG_(clo_cmp_race_err_addrs)
704                       ? xe1->XE.Race.data_addr == xe2->XE.Race.data_addr
705                       : True);
706      case XE_UnlockUnlocked:
707         return xe1->XE.UnlockUnlocked.thr == xe2->XE.UnlockUnlocked.thr
708                && xe1->XE.UnlockUnlocked.lock == xe2->XE.UnlockUnlocked.lock;
709      case XE_UnlockForeign:
710         return xe1->XE.UnlockForeign.thr == xe2->XE.UnlockForeign.thr
711                && xe1->XE.UnlockForeign.owner == xe2->XE.UnlockForeign.owner
712                && xe1->XE.UnlockForeign.lock == xe2->XE.UnlockForeign.lock;
713      case XE_UnlockBogus:
714         return xe1->XE.UnlockBogus.thr == xe2->XE.UnlockBogus.thr
715                && xe1->XE.UnlockBogus.lock_ga == xe2->XE.UnlockBogus.lock_ga;
716      case XE_PthAPIerror:
717         return xe1->XE.PthAPIerror.thr == xe2->XE.PthAPIerror.thr
718                && 0==VG_(strcmp)(xe1->XE.PthAPIerror.fnname,
719                                  xe2->XE.PthAPIerror.fnname)
720                && xe1->XE.PthAPIerror.err == xe2->XE.PthAPIerror.err;
721      case XE_LockOrder:
722         return xe1->XE.LockOrder.thr == xe2->XE.LockOrder.thr;
723      case XE_Misc:
724         return xe1->XE.Misc.thr == xe2->XE.Misc.thr
725                && 0==VG_(strcmp)(xe1->XE.Misc.errstr, xe2->XE.Misc.errstr);
726      default:
727         tl_assert(0);
728   }
729
730   /*NOTREACHED*/
731   tl_assert(0);
732}
733
734
735/*----------------------------------------------------------------*/
736/*--- Error management -- printing                             ---*/
737/*----------------------------------------------------------------*/
738
739/* Do a printf-style operation on either the XML or normal output
740   channel, depending on the setting of VG_(clo_xml).
741*/
742static void emit_WRK ( HChar* format, va_list vargs )
743{
744   if (VG_(clo_xml)) {
745      VG_(vprintf_xml)(format, vargs);
746   } else {
747      VG_(vmessage)(Vg_UserMsg, format, vargs);
748   }
749}
750static void emit ( HChar* format, ... ) PRINTF_CHECK(1, 2);
751static void emit ( HChar* format, ... )
752{
753   va_list vargs;
754   va_start(vargs, format);
755   emit_WRK(format, vargs);
756   va_end(vargs);
757}
758
759
760/* Announce (that is, print the point-of-creation) of 'thr'.  Only do
761   this once, as we only want to see these announcements once per
762   thread.  Returned Bool indicates whether or not an announcement was
763   made.
764*/
765static Bool announce_one_thread ( Thread* thr )
766{
767   tl_assert(HG_(is_sane_Thread)(thr));
768   tl_assert(thr->errmsg_index >= 1);
769   if (thr->announced)
770      return False;
771
772   if (VG_(clo_xml)) {
773
774      VG_(printf_xml)("<announcethread>\n");
775      VG_(printf_xml)("  <hthreadid>%d</hthreadid>\n", thr->errmsg_index);
776      if (thr->errmsg_index == 1) {
777         tl_assert(thr->created_at == NULL);
778         VG_(printf_xml)("  <isrootthread></isrootthread>\n");
779      } else {
780         tl_assert(thr->created_at != NULL);
781         VG_(pp_ExeContext)( thr->created_at );
782      }
783      VG_(printf_xml)("</announcethread>\n\n");
784
785   } else {
786
787      VG_(umsg)("---Thread-Announcement----------"
788                "--------------------------------" "\n");
789      VG_(umsg)("\n");
790
791      if (thr->errmsg_index == 1) {
792         tl_assert(thr->created_at == NULL);
793         VG_(message)(Vg_UserMsg,
794                      "Thread #%d is the program's root thread\n",
795                       thr->errmsg_index);
796      } else {
797         tl_assert(thr->created_at != NULL);
798         VG_(message)(Vg_UserMsg, "Thread #%d was created\n",
799                                  thr->errmsg_index);
800         VG_(pp_ExeContext)( thr->created_at );
801      }
802      VG_(message)(Vg_UserMsg, "\n");
803
804   }
805
806   thr->announced = True;
807   return True;
808}
809
810
811/* Announce 'lk'. */
812static void announce_LockP ( Lock* lk )
813{
814   tl_assert(lk);
815   if (lk == Lock_INVALID)
816      return; /* Can't be announced -- we know nothing about it. */
817   tl_assert(lk->magic == LockP_MAGIC);
818   if (!lk->appeared_at)
819     return; /* There's nothing we can show */
820
821   if (VG_(clo_xml)) {
822      /* fixme: add announcement */
823   } else {
824      VG_(umsg)( "Lock at %p was first observed\n",
825                 (void*)lk->guestaddr );
826      VG_(pp_ExeContext)( lk->appeared_at );
827      VG_(umsg)("\n");
828   }
829}
830
831/* Announce (that is, print point-of-first-observation) for the
832   locks in 'lockvec' and, if non-NULL, 'lockvec2'. */
833static void announce_combined_LockP_vecs ( Lock** lockvec,
834                                           Lock** lockvec2 )
835{
836   UWord i;
837   tl_assert(lockvec);
838   for (i = 0; lockvec[i]; i++) {
839      announce_LockP(lockvec[i]);
840   }
841   if (lockvec2) {
842      for (i = 0; lockvec2[i]; i++) {
843         Lock* lk = lockvec2[i];
844         if (!elem_LockP_vector(lockvec, lk))
845            announce_LockP(lk);
846      }
847   }
848}
849
850
851static void show_LockP_summary_textmode ( Lock** locks, HChar* pre )
852{
853   tl_assert(locks);
854   UWord i;
855   UWord nLocks = 0, nLocksValid = 0;
856   count_LockP_vector(&nLocks, &nLocksValid, locks);
857   tl_assert(nLocksValid <= nLocks);
858
859   if (nLocks == 0) {
860      VG_(umsg)( "%sLocks held: none", pre );
861   } else {
862      VG_(umsg)( "%sLocks held: %lu, at address%s ",
863                 pre, nLocks, nLocksValid == 1 ? "" : "es" );
864   }
865
866   if (nLocks > 0) {
867      for (i = 0; i < nLocks; i++) {
868         if (locks[i] == Lock_INVALID)
869            continue;
870         VG_(umsg)( "%p", (void*)locks[i]->guestaddr);
871         if (locks[i+1] != NULL)
872            VG_(umsg)(" ");
873      }
874      if (nLocksValid < nLocks)
875         VG_(umsg)(" (and %lu that can't be shown)", nLocks - nLocksValid);
876   }
877   VG_(umsg)("\n");
878}
879
880
881/* This is the "this error is due to be printed shortly; so have a
882   look at it any print any preamble you want" function.  We use it to
883   announce any previously un-announced threads in the upcoming error
884   message.
885*/
886void HG_(before_pp_Error) ( Error* err )
887{
888   XError* xe;
889   tl_assert(err);
890   xe = (XError*)VG_(get_error_extra)(err);
891   tl_assert(xe);
892
893   switch (VG_(get_error_kind)(err)) {
894      case XE_Misc:
895         announce_one_thread( xe->XE.Misc.thr );
896         break;
897      case XE_LockOrder:
898         announce_one_thread( xe->XE.LockOrder.thr );
899         break;
900      case XE_PthAPIerror:
901         announce_one_thread( xe->XE.PthAPIerror.thr );
902         break;
903      case XE_UnlockBogus:
904         announce_one_thread( xe->XE.UnlockBogus.thr );
905         break;
906      case XE_UnlockForeign:
907         announce_one_thread( xe->XE.UnlockForeign.thr );
908         announce_one_thread( xe->XE.UnlockForeign.owner );
909         break;
910      case XE_UnlockUnlocked:
911         announce_one_thread( xe->XE.UnlockUnlocked.thr );
912         break;
913      case XE_Race:
914         announce_one_thread( xe->XE.Race.thr );
915         if (xe->XE.Race.h2_ct)
916            announce_one_thread( xe->XE.Race.h2_ct );
917         if (xe->XE.Race.h1_ct)
918            announce_one_thread( xe->XE.Race.h1_ct );
919         break;
920      default:
921         tl_assert(0);
922   }
923}
924
925void HG_(pp_Error) ( Error* err )
926{
927   const Bool xml = VG_(clo_xml); /* a shorthand, that's all */
928
929   if (!xml) {
930      VG_(umsg)("--------------------------------"
931                "--------------------------------" "\n");
932      VG_(umsg)("\n");
933   }
934
935   XError *xe = (XError*)VG_(get_error_extra)(err);
936   tl_assert(xe);
937
938   if (xml)
939      emit( "  <kind>%s</kind>\n", HG_(get_error_name)(err));
940
941   switch (VG_(get_error_kind)(err)) {
942
943   case XE_Misc: {
944      tl_assert( HG_(is_sane_Thread)( xe->XE.Misc.thr ) );
945
946      if (xml) {
947
948         emit( "  <xwhat>\n" );
949         emit( "    <text>Thread #%d: %s</text>\n",
950               (Int)xe->XE.Misc.thr->errmsg_index,
951               xe->XE.Misc.errstr );
952         emit( "    <hthreadid>%d</hthreadid>\n",
953               (Int)xe->XE.Misc.thr->errmsg_index );
954         emit( "  </xwhat>\n" );
955         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
956         if (xe->XE.Misc.auxstr) {
957            emit("  <auxwhat>%s</auxwhat>\n", xe->XE.Misc.auxstr);
958            if (xe->XE.Misc.auxctx)
959               VG_(pp_ExeContext)( xe->XE.Misc.auxctx );
960         }
961
962      } else {
963
964         emit( "Thread #%d: %s\n",
965               (Int)xe->XE.Misc.thr->errmsg_index,
966               xe->XE.Misc.errstr );
967         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
968         if (xe->XE.Misc.auxstr) {
969            emit(" %s\n", xe->XE.Misc.auxstr);
970            if (xe->XE.Misc.auxctx)
971               VG_(pp_ExeContext)( xe->XE.Misc.auxctx );
972         }
973
974      }
975      break;
976   }
977
978   case XE_LockOrder: {
979      tl_assert( HG_(is_sane_Thread)( xe->XE.LockOrder.thr ) );
980
981      if (xml) {
982
983         emit( "  <xwhat>\n" );
984         emit( "    <text>Thread #%d: lock order \"%p before %p\" "
985                    "violated</text>\n",
986               (Int)xe->XE.LockOrder.thr->errmsg_index,
987               (void*)xe->XE.LockOrder.shouldbe_earlier_ga,
988               (void*)xe->XE.LockOrder.shouldbe_later_ga );
989         emit( "    <hthreadid>%d</hthreadid>\n",
990               (Int)xe->XE.LockOrder.thr->errmsg_index );
991         emit( "  </xwhat>\n" );
992         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
993         if (xe->XE.LockOrder.shouldbe_earlier_ec
994             && xe->XE.LockOrder.shouldbe_later_ec) {
995            emit( "  <auxwhat>Required order was established by "
996                  "acquisition of lock at %p</auxwhat>\n",
997                  (void*)xe->XE.LockOrder.shouldbe_earlier_ga );
998            VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_earlier_ec );
999            emit( "  <auxwhat>followed by a later acquisition "
1000                  "of lock at %p</auxwhat>\n",
1001                  (void*)xe->XE.LockOrder.shouldbe_later_ga );
1002            VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_later_ec );
1003         }
1004
1005      } else {
1006
1007         emit( "Thread #%d: lock order \"%p before %p\" violated\n",
1008               (Int)xe->XE.LockOrder.thr->errmsg_index,
1009               (void*)xe->XE.LockOrder.shouldbe_earlier_ga,
1010               (void*)xe->XE.LockOrder.shouldbe_later_ga );
1011         emit( "\n" );
1012         emit( "Observed (incorrect) order is: "
1013               "acquisition of lock at %p\n",
1014               (void*)xe->XE.LockOrder.shouldbe_later_ga);
1015         if (xe->XE.LockOrder.actual_earlier_ec) {
1016             VG_(pp_ExeContext)(xe->XE.LockOrder.actual_earlier_ec);
1017         } else {
1018            emit("   (stack unavailable)\n");
1019         }
1020         emit( "\n" );
1021         emit(" followed by a later acquisition of lock at %p\n",
1022              (void*)xe->XE.LockOrder.shouldbe_earlier_ga);
1023         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1024         if (xe->XE.LockOrder.shouldbe_earlier_ec
1025             && xe->XE.LockOrder.shouldbe_later_ec) {
1026            emit("\n");
1027            emit( "Required order was established by "
1028                  "acquisition of lock at %p\n",
1029                  (void*)xe->XE.LockOrder.shouldbe_earlier_ga );
1030            VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_earlier_ec );
1031            emit( "\n" );
1032            emit( " followed by a later acquisition of lock at %p\n",
1033                  (void*)xe->XE.LockOrder.shouldbe_later_ga );
1034            VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_later_ec );
1035         }
1036
1037      }
1038
1039      break;
1040   }
1041
1042   case XE_PthAPIerror: {
1043      tl_assert( HG_(is_sane_Thread)( xe->XE.PthAPIerror.thr ) );
1044
1045      if (xml) {
1046
1047         emit( "  <xwhat>\n" );
1048         emit(
1049            "    <text>Thread #%d's call to %pS failed</text>\n",
1050            (Int)xe->XE.PthAPIerror.thr->errmsg_index,
1051            xe->XE.PthAPIerror.fnname );
1052         emit( "    <hthreadid>%d</hthreadid>\n",
1053               (Int)xe->XE.PthAPIerror.thr->errmsg_index );
1054         emit( "  </xwhat>\n" );
1055         emit( "  <what>with error code %ld (%s)</what>\n",
1056               xe->XE.PthAPIerror.err, xe->XE.PthAPIerror.errstr );
1057         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1058
1059      } else {
1060
1061         emit( "Thread #%d's call to %pS failed\n",
1062                      (Int)xe->XE.PthAPIerror.thr->errmsg_index,
1063                      xe->XE.PthAPIerror.fnname );
1064         emit( "   with error code %ld (%s)\n",
1065               xe->XE.PthAPIerror.err, xe->XE.PthAPIerror.errstr );
1066         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1067
1068      }
1069
1070      break;
1071   }
1072
1073   case XE_UnlockBogus: {
1074      tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockBogus.thr ) );
1075
1076      if (xml) {
1077
1078         emit( "  <xwhat>\n" );
1079         emit( "    <text>Thread #%d unlocked an invalid "
1080                    "lock at %p</text>\n",
1081               (Int)xe->XE.UnlockBogus.thr->errmsg_index,
1082               (void*)xe->XE.UnlockBogus.lock_ga );
1083         emit( "    <hthreadid>%d</hthreadid>\n",
1084               (Int)xe->XE.UnlockBogus.thr->errmsg_index );
1085         emit( "  </xwhat>\n" );
1086         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1087
1088      } else {
1089
1090         emit( "Thread #%d unlocked an invalid lock at %p\n",
1091               (Int)xe->XE.UnlockBogus.thr->errmsg_index,
1092               (void*)xe->XE.UnlockBogus.lock_ga );
1093         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1094
1095      }
1096
1097      break;
1098   }
1099
1100   case XE_UnlockForeign: {
1101      tl_assert( HG_(is_sane_LockP)( xe->XE.UnlockForeign.lock ) );
1102      tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockForeign.owner ) );
1103      tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockForeign.thr ) );
1104
1105      if (xml) {
1106
1107         emit( "  <xwhat>\n" );
1108         emit( "    <text>Thread #%d unlocked lock at %p "
1109                    "currently held by thread #%d</text>\n",
1110               (Int)xe->XE.UnlockForeign.thr->errmsg_index,
1111               (void*)xe->XE.UnlockForeign.lock->guestaddr,
1112               (Int)xe->XE.UnlockForeign.owner->errmsg_index );
1113         emit( "    <hthreadid>%d</hthreadid>\n",
1114               (Int)xe->XE.UnlockForeign.thr->errmsg_index );
1115         emit( "    <hthreadid>%d</hthreadid>\n",
1116               (Int)xe->XE.UnlockForeign.owner->errmsg_index );
1117         emit( "  </xwhat>\n" );
1118         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1119
1120         if (xe->XE.UnlockForeign.lock->appeared_at) {
1121            emit( "  <auxwhat>Lock at %p was first observed</auxwhat>\n",
1122                  (void*)xe->XE.UnlockForeign.lock->guestaddr );
1123            VG_(pp_ExeContext)( xe->XE.UnlockForeign.lock->appeared_at );
1124         }
1125
1126      } else {
1127
1128         emit( "Thread #%d unlocked lock at %p "
1129               "currently held by thread #%d\n",
1130               (Int)xe->XE.UnlockForeign.thr->errmsg_index,
1131               (void*)xe->XE.UnlockForeign.lock->guestaddr,
1132               (Int)xe->XE.UnlockForeign.owner->errmsg_index );
1133         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1134         if (xe->XE.UnlockForeign.lock->appeared_at) {
1135            emit( "  Lock at %p was first observed\n",
1136                  (void*)xe->XE.UnlockForeign.lock->guestaddr );
1137            VG_(pp_ExeContext)( xe->XE.UnlockForeign.lock->appeared_at );
1138         }
1139
1140      }
1141
1142      break;
1143   }
1144
1145   case XE_UnlockUnlocked: {
1146      tl_assert( HG_(is_sane_LockP)( xe->XE.UnlockUnlocked.lock ) );
1147      tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockUnlocked.thr ) );
1148
1149      if (xml) {
1150
1151         emit( "  <xwhat>\n" );
1152         emit( "    <text>Thread #%d unlocked a "
1153                    "not-locked lock at %p</text>\n",
1154               (Int)xe->XE.UnlockUnlocked.thr->errmsg_index,
1155               (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
1156         emit( "    <hthreadid>%d</hthreadid>\n",
1157               (Int)xe->XE.UnlockUnlocked.thr->errmsg_index );
1158         emit( "  </xwhat>\n" );
1159         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1160         if (xe->XE.UnlockUnlocked.lock->appeared_at) {
1161            emit( "  <auxwhat>Lock at %p was first observed</auxwhat>\n",
1162                  (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
1163            VG_(pp_ExeContext)( xe->XE.UnlockUnlocked.lock->appeared_at );
1164         }
1165
1166      } else {
1167
1168         emit( "Thread #%d unlocked a not-locked lock at %p\n",
1169               (Int)xe->XE.UnlockUnlocked.thr->errmsg_index,
1170               (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
1171         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1172         if (xe->XE.UnlockUnlocked.lock->appeared_at) {
1173            emit( "  Lock at %p was first observed\n",
1174                  (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
1175            VG_(pp_ExeContext)( xe->XE.UnlockUnlocked.lock->appeared_at );
1176         }
1177
1178      }
1179
1180      break;
1181   }
1182
1183   case XE_Race: {
1184      Addr      err_ga;
1185      HChar*    what;
1186      Int       szB;
1187      what      = xe->XE.Race.isWrite ? "write" : "read";
1188      szB       = xe->XE.Race.szB;
1189      err_ga = VG_(get_error_address)(err);
1190
1191      tl_assert( HG_(is_sane_Thread)( xe->XE.Race.thr ));
1192      if (xe->XE.Race.h2_ct)
1193         tl_assert( HG_(is_sane_Thread)( xe->XE.Race.h2_ct ));
1194
1195      if (xml) {
1196
1197         /* ------ XML ------ */
1198         emit( "  <xwhat>\n" );
1199         emit( "    <text>Possible data race during %s of size %d "
1200                    "at %p by thread #%d</text>\n",
1201               what, szB, (void*)err_ga, (Int)xe->XE.Race.thr->errmsg_index );
1202         emit( "    <hthreadid>%d</hthreadid>\n",
1203               (Int)xe->XE.Race.thr->errmsg_index );
1204         emit( "  </xwhat>\n" );
1205         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1206
1207         if (xe->XE.Race.h2_ct) {
1208            tl_assert(xe->XE.Race.h2_ct_accEC); // assured by update_extra
1209            emit( "  <xauxwhat>\n");
1210            emit( "    <text>This conflicts with a previous %s of size %d "
1211                            "by thread #%d</text>\n",
1212                  xe->XE.Race.h2_ct_accIsW ? "write" : "read",
1213                  xe->XE.Race.h2_ct_accSzB,
1214                  xe->XE.Race.h2_ct->errmsg_index );
1215            emit( "    <hthreadid>%d</hthreadid>\n",
1216                  xe->XE.Race.h2_ct->errmsg_index);
1217            emit("  </xauxwhat>\n");
1218            VG_(pp_ExeContext)( xe->XE.Race.h2_ct_accEC );
1219         }
1220
1221         if (xe->XE.Race.h1_ct) {
1222            emit( "  <xauxwhat>\n");
1223            emit( "    <text>This conflicts with a previous access "
1224                  "by thread #%d, after</text>\n",
1225                  xe->XE.Race.h1_ct->errmsg_index );
1226            emit( "    <hthreadid>%d</hthreadid>\n",
1227                  xe->XE.Race.h1_ct->errmsg_index );
1228            emit("  </xauxwhat>\n");
1229            if (xe->XE.Race.h1_ct_mbsegstartEC) {
1230               VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegstartEC );
1231            } else {
1232               emit( "  <auxwhat>(the start of the thread)</auxwhat>\n" );
1233            }
1234            emit( "  <auxwhat>but before</auxwhat>\n" );
1235            if (xe->XE.Race.h1_ct_mbsegendEC) {
1236               VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegendEC );
1237            } else {
1238               emit( "  <auxwhat>(the end of the the thread)</auxwhat>\n" );
1239            }
1240         }
1241
1242      } else {
1243
1244         /* ------ Text ------ */
1245         announce_combined_LockP_vecs( xe->XE.Race.locksHeldW,
1246                                       xe->XE.Race.h2_ct_locksHeldW );
1247
1248         emit( "Possible data race during %s of size %d "
1249               "at %p by thread #%d\n",
1250               what, szB, (void*)err_ga, (Int)xe->XE.Race.thr->errmsg_index );
1251
1252         tl_assert(xe->XE.Race.locksHeldW);
1253         show_LockP_summary_textmode( xe->XE.Race.locksHeldW, "" );
1254         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1255
1256         if (xe->XE.Race.h2_ct) {
1257            tl_assert(xe->XE.Race.h2_ct_accEC); // assured by update_extra
1258            tl_assert(xe->XE.Race.h2_ct_locksHeldW);
1259            emit( "\n" );
1260            emit( "This conflicts with a previous %s of size %d "
1261                  "by thread #%d\n",
1262                  xe->XE.Race.h2_ct_accIsW ? "write" : "read",
1263                  xe->XE.Race.h2_ct_accSzB,
1264                  xe->XE.Race.h2_ct->errmsg_index );
1265            show_LockP_summary_textmode( xe->XE.Race.h2_ct_locksHeldW, "" );
1266            VG_(pp_ExeContext)( xe->XE.Race.h2_ct_accEC );
1267         }
1268
1269         if (xe->XE.Race.h1_ct) {
1270            emit( " This conflicts with a previous access by thread #%d, "
1271                  "after\n",
1272                  xe->XE.Race.h1_ct->errmsg_index );
1273            if (xe->XE.Race.h1_ct_mbsegstartEC) {
1274               VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegstartEC );
1275            } else {
1276               emit( "   (the start of the thread)\n" );
1277            }
1278            emit( " but before\n" );
1279            if (xe->XE.Race.h1_ct_mbsegendEC) {
1280               VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegendEC );
1281            } else {
1282               emit( "   (the end of the the thread)\n" );
1283            }
1284         }
1285
1286      }
1287
1288      /* If we have a description of the address in terms of a heap
1289         block, show it. */
1290      if (xe->XE.Race.hctxt) {
1291         SizeT delta = err_ga - xe->XE.Race.haddr;
1292         if (xml) {
1293            emit("  <auxwhat>Address %p is %ld bytes inside a block "
1294                 "of size %ld alloc'd</auxwhat>\n", (void*)err_ga, delta,
1295                 xe->XE.Race.hszB);
1296            VG_(pp_ExeContext)( xe->XE.Race.hctxt );
1297         } else {
1298            emit("\n");
1299            emit("Address %p is %ld bytes inside a block "
1300                 "of size %ld alloc'd\n", (void*)err_ga, delta,
1301                 xe->XE.Race.hszB);
1302            VG_(pp_ExeContext)( xe->XE.Race.hctxt );
1303         }
1304      }
1305
1306      /* If we have a better description of the address, show it.
1307         Note that in XML mode, it will already by nicely wrapped up
1308         in tags, either <auxwhat> or <xauxwhat>, so we can just emit
1309         it verbatim. */
1310      if (xml) {
1311         if (xe->XE.Race.descr1)
1312            emit( "  %s\n",
1313                  (HChar*)VG_(indexXA)( xe->XE.Race.descr1, 0 ) );
1314         if (xe->XE.Race.descr2)
1315            emit( "  %s\n",
1316                  (HChar*)VG_(indexXA)( xe->XE.Race.descr2, 0 ) );
1317      } else {
1318         if (xe->XE.Race.descr1 || xe->XE.Race.descr2)
1319            emit("\n");
1320         if (xe->XE.Race.descr1)
1321            emit( "%s\n",
1322                  (HChar*)VG_(indexXA)( xe->XE.Race.descr1, 0 ) );
1323         if (xe->XE.Race.descr2)
1324            emit( "%s\n",
1325                  (HChar*)VG_(indexXA)( xe->XE.Race.descr2, 0 ) );
1326      }
1327
1328      break; /* case XE_Race */
1329   } /* case XE_Race */
1330
1331   default:
1332      tl_assert(0);
1333   } /* switch (VG_(get_error_kind)(err)) */
1334}
1335
1336Char* HG_(get_error_name) ( Error* err )
1337{
1338   switch (VG_(get_error_kind)(err)) {
1339      case XE_Race:           return "Race";
1340      case XE_UnlockUnlocked: return "UnlockUnlocked";
1341      case XE_UnlockForeign:  return "UnlockForeign";
1342      case XE_UnlockBogus:    return "UnlockBogus";
1343      case XE_PthAPIerror:    return "PthAPIerror";
1344      case XE_LockOrder:      return "LockOrder";
1345      case XE_Misc:           return "Misc";
1346      default: tl_assert(0); /* fill in missing case */
1347   }
1348}
1349
1350Bool HG_(recognised_suppression) ( Char* name, Supp *su )
1351{
1352#  define TRY(_name,_xskind)                   \
1353      if (0 == VG_(strcmp)(name, (_name))) {   \
1354         VG_(set_supp_kind)(su, (_xskind));    \
1355         return True;                          \
1356      }
1357   TRY("Race",           XS_Race);
1358   TRY("FreeMemLock",    XS_FreeMemLock);
1359   TRY("UnlockUnlocked", XS_UnlockUnlocked);
1360   TRY("UnlockForeign",  XS_UnlockForeign);
1361   TRY("UnlockBogus",    XS_UnlockBogus);
1362   TRY("PthAPIerror",    XS_PthAPIerror);
1363   TRY("LockOrder",      XS_LockOrder);
1364   TRY("Misc",           XS_Misc);
1365   return False;
1366#  undef TRY
1367}
1368
1369Bool HG_(read_extra_suppression_info) ( Int fd, Char** bufpp, SizeT* nBufp,
1370                                        Supp* su )
1371{
1372   /* do nothing -- no extra suppression info present.  Return True to
1373      indicate nothing bad happened. */
1374   return True;
1375}
1376
1377Bool HG_(error_matches_suppression) ( Error* err, Supp* su )
1378{
1379   switch (VG_(get_supp_kind)(su)) {
1380   case XS_Race:           return VG_(get_error_kind)(err) == XE_Race;
1381   case XS_UnlockUnlocked: return VG_(get_error_kind)(err) == XE_UnlockUnlocked;
1382   case XS_UnlockForeign:  return VG_(get_error_kind)(err) == XE_UnlockForeign;
1383   case XS_UnlockBogus:    return VG_(get_error_kind)(err) == XE_UnlockBogus;
1384   case XS_PthAPIerror:    return VG_(get_error_kind)(err) == XE_PthAPIerror;
1385   case XS_LockOrder:      return VG_(get_error_kind)(err) == XE_LockOrder;
1386   case XS_Misc:           return VG_(get_error_kind)(err) == XE_Misc;
1387   //case XS_: return VG_(get_error_kind)(err) == XE_;
1388   default: tl_assert(0); /* fill in missing cases */
1389   }
1390}
1391
1392Bool HG_(get_extra_suppression_info) ( Error* err,
1393                                       /*OUT*/Char* buf, Int nBuf )
1394{
1395   /* Do nothing */
1396   return False;
1397}
1398
1399
1400/*--------------------------------------------------------------------*/
1401/*--- end                                              hg_errors.c ---*/
1402/*--------------------------------------------------------------------*/
1403