1
2/*--------------------------------------------------------------------*/
3/*--- Error management for Helgrind.                               ---*/
4/*---                                                  hg_errors.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8   This file is part of Helgrind, a Valgrind tool for detecting errors
9   in threaded programs.
10
11   Copyright (C) 2007-2013 OpenWorks Ltd
12      info@open-works.co.uk
13
14   This program is free software; you can redistribute it and/or
15   modify it under the terms of the GNU General Public License as
16   published by the Free Software Foundation; either version 2 of the
17   License, or (at your option) any later version.
18
19   This program is distributed in the hope that it will be useful, but
20   WITHOUT ANY WARRANTY; without even the implied warranty of
21   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22   General Public License for more details.
23
24   You should have received a copy of the GNU General Public License
25   along with this program; if not, write to the Free Software
26   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27   02111-1307, USA.
28
29   The GNU General Public License is contained in the file COPYING.
30*/
31
32#include "pub_tool_basics.h"
33#include "pub_tool_libcbase.h"
34#include "pub_tool_libcassert.h"
35#include "pub_tool_libcprint.h"
36#include "pub_tool_execontext.h"
37#include "pub_tool_errormgr.h"
38#include "pub_tool_wordfm.h"
39#include "pub_tool_xarray.h"
40#include "pub_tool_debuginfo.h"
41#include "pub_tool_threadstate.h"
42#include "pub_tool_options.h"     // VG_(clo_xml)
43#include "pub_tool_addrinfo.h"
44
45#include "hg_basics.h"
46#include "hg_addrdescr.h"
47#include "hg_wordset.h"
48#include "hg_lock_n_thread.h"
49#include "libhb.h"
50#include "hg_errors.h"            /* self */
51
52
53/*----------------------------------------------------------------*/
54/*--- Error management -- storage                              ---*/
55/*----------------------------------------------------------------*/
56
57/* maps (by value) strings to a copy of them in ARENA_TOOL */
58
59static WordFM* string_table = NULL;
60
61ULong HG_(stats__string_table_queries) = 0;
62
63ULong HG_(stats__string_table_get_map_size) ( void ) {
64   return string_table ? (ULong)VG_(sizeFM)(string_table) : 0;
65}
66
67static Word string_table_cmp ( UWord s1, UWord s2 ) {
68   return (Word)VG_(strcmp)( (HChar*)s1, (HChar*)s2 );
69}
70
71static HChar* string_table_strdup ( const HChar* str ) {
72   HChar* copy = NULL;
73   HG_(stats__string_table_queries)++;
74   if (!str)
75      str = "(null)";
76   if (!string_table) {
77      string_table = VG_(newFM)( HG_(zalloc), "hg.sts.1",
78                                 HG_(free), string_table_cmp );
79      tl_assert(string_table);
80   }
81   if (VG_(lookupFM)( string_table,
82                      NULL, (UWord*)&copy, (UWord)str )) {
83      tl_assert(copy);
84      if (0) VG_(printf)("string_table_strdup: %p -> %p\n", str, copy );
85      return copy;
86   } else {
87      copy = HG_(strdup)("hg.sts.2", str);
88      tl_assert(copy);
89      VG_(addToFM)( string_table, (UWord)copy, (UWord)copy );
90      return copy;
91   }
92}
93
94/* maps from Lock .unique fields to LockP*s */
95
96static WordFM* map_LockN_to_P = NULL;
97
98ULong HG_(stats__LockN_to_P_queries) = 0;
99
100ULong HG_(stats__LockN_to_P_get_map_size) ( void ) {
101   return map_LockN_to_P ? (ULong)VG_(sizeFM)(map_LockN_to_P) : 0;
102}
103
104static Word lock_unique_cmp ( UWord lk1W, UWord lk2W )
105{
106   Lock* lk1 = (Lock*)lk1W;
107   Lock* lk2 = (Lock*)lk2W;
108   tl_assert( HG_(is_sane_LockNorP)(lk1) );
109   tl_assert( HG_(is_sane_LockNorP)(lk2) );
110   if (lk1->unique < lk2->unique) return -1;
111   if (lk1->unique > lk2->unique) return 1;
112   return 0;
113}
114
115/* Given a normal Lock (LockN), convert it to a persistent Lock
116   (LockP).  In some cases the LockN could be invalid (if it's been
117   freed), so we enquire, in hg_main.c's admin_locks list, whether it
118   is in fact valid.  If allowed_to_be_invalid is True, then it's OK
119   for the LockN to be invalid, in which case Lock_INVALID is
120   returned.  In all other cases, we insist that the LockN is a valid
121   lock, and return its corresponding LockP.
122
123   Why can LockNs sometimes be invalid?  Because they are harvested
124   from locksets that are attached to the OldRef info for conflicting
125   threads.  By the time we detect a race, the some of the elements of
126   the lockset may have been destroyed by the client, in which case
127   the corresponding Lock structures we maintain will have been freed.
128
129   So we check that each LockN is a member of the admin_locks double
130   linked list of all Lock structures.  That stops us prodding around
131   in potentially freed-up Lock structures.  However, it's not quite a
132   proper check: if a new Lock has been reallocated at the same
133   address as one which was previously freed, we'll wind up copying
134   the new one as the basis for the LockP, which is completely bogus
135   because it is unrelated to the previous Lock that lived there.
136   Let's hope that doesn't happen too often.
137*/
138static Lock* mk_LockP_from_LockN ( Lock* lkn,
139                                   Bool allowed_to_be_invalid )
140{
141   Lock* lkp = NULL;
142   HG_(stats__LockN_to_P_queries)++;
143
144   /* First off, let's do some sanity checks.  If
145      allowed_to_be_invalid is False, we _must_ be able to find 'lkn'
146      in admin_locks; else we must assert.  If it is True, it's OK for
147      it not to be findable, but in that case we must return
148      Lock_INVALID right away. */
149   Lock* lock_list = HG_(get_admin_locks)();
150   while (lock_list) {
151      if (lock_list == lkn)
152         break;
153      lock_list = lock_list->admin_next;
154   }
155   if (lock_list == NULL) {
156      /* We didn't find it.  That possibility has to be OK'd by the
157         caller. */
158      tl_assert(allowed_to_be_invalid);
159      return Lock_INVALID;
160   }
161
162   /* So we must be looking at a valid LockN. */
163   tl_assert( HG_(is_sane_LockN)(lkn) );
164
165   if (!map_LockN_to_P) {
166      map_LockN_to_P = VG_(newFM)( HG_(zalloc), "hg.mLPfLN.1",
167                                   HG_(free), lock_unique_cmp );
168      tl_assert(map_LockN_to_P);
169   }
170   if (!VG_(lookupFM)( map_LockN_to_P, NULL, (UWord*)&lkp, (UWord)lkn)) {
171      lkp = HG_(zalloc)( "hg.mLPfLN.2", sizeof(Lock) );
172      *lkp = *lkn;
173      lkp->admin_next = NULL;
174      lkp->admin_prev = NULL;
175      lkp->magic = LockP_MAGIC;
176      /* Forget about the bag of lock holders - don't copy that.
177         Also, acquired_at should be NULL whenever heldBy is, and vice
178         versa.  Also forget about the associated libhb synch object. */
179      lkp->heldW  = False;
180      lkp->heldBy = NULL;
181      lkp->acquired_at = NULL;
182      lkp->hbso = NULL;
183      VG_(addToFM)( map_LockN_to_P, (UWord)lkp, (UWord)lkp );
184   }
185   tl_assert( HG_(is_sane_LockP)(lkp) );
186   return lkp;
187}
188
189/* Expand a WordSet of LockN*'s into a NULL-terminated vector of
190   LockP*'s.  Any LockN's that can't be converted into a LockP
191   (because they have been freed, see comment on mk_LockP_from_LockN)
192   are converted instead into the value Lock_INVALID.  Hence the
193   returned vector is a sequence: zero or more (valid LockP* or
194   LockN_INVALID), terminated by a NULL. */
195static
196Lock** enumerate_WordSet_into_LockP_vector( WordSetU* univ_lsets,
197                                            WordSetID lockset,
198                                            Bool allowed_to_be_invalid )
199{
200   tl_assert(univ_lsets);
201   tl_assert( HG_(plausibleWS)(univ_lsets, lockset) );
202   UWord  nLocks = HG_(cardinalityWS)(univ_lsets, lockset);
203   Lock** lockPs = HG_(zalloc)( "hg.eWSiLPa",
204                                (nLocks+1) * sizeof(Lock*) );
205   tl_assert(lockPs);
206   tl_assert(lockPs[nLocks] == NULL); /* pre-NULL terminated */
207   UWord* lockNs  = NULL;
208   UWord  nLockNs = 0;
209   if (nLocks > 0)  {
210      /* HG_(getPayloadWS) doesn't assign non-NULL to &lockNs if the
211         lockset is empty; hence the guarding "if".  Sigh. */
212      HG_(getPayloadWS)( &lockNs, &nLockNs, univ_lsets, lockset );
213      tl_assert(lockNs);
214   }
215   UWord i;
216   /* Convert to LockPs. */
217   for (i = 0; i < nLockNs; i++) {
218      lockPs[i] = mk_LockP_from_LockN( (Lock*)lockNs[i],
219                                       allowed_to_be_invalid );
220   }
221   return lockPs;
222}
223
224/* Get the number of useful elements in a vector created by
225   enumerate_WordSet_into_LockP_vector.  Returns both the total number
226   of elements (not including the terminating NULL) and the number of
227   non-Lock_INVALID elements. */
228static void count_LockP_vector ( /*OUT*/UWord* nLocks,
229                                 /*OUT*/UWord* nLocksValid,
230                                 Lock** vec )
231{
232   tl_assert(vec);
233   *nLocks = *nLocksValid = 0;
234   UWord n = 0;
235   while (vec[n]) {
236      (*nLocks)++;
237      if (vec[n] != Lock_INVALID)
238         (*nLocksValid)++;
239      n++;
240   }
241}
242
243/* Find out whether 'lk' is in 'vec'. */
244static Bool elem_LockP_vector ( Lock** vec, Lock* lk )
245{
246   tl_assert(vec);
247   tl_assert(lk);
248   UWord n = 0;
249   while (vec[n]) {
250      if (vec[n] == lk)
251         return True;
252      n++;
253   }
254   return False;
255}
256
257
258/* Errors:
259
260      race: program counter
261            read or write
262            data size
263            previous state
264            current state
265
266      FIXME: how does state printing interact with lockset gc?
267      Are the locksets in prev/curr state always valid?
268      Ditto question for the threadsets
269          ThreadSets - probably are always valid if Threads
270          are never thrown away.
271          LockSets - could at least print the lockset elements that
272          correspond to actual locks at the time of printing.  Hmm.
273*/
274
275/* Error kinds */
276typedef
277   enum {
278      XE_Race=1101,      // race
279      XE_UnlockUnlocked, // unlocking a not-locked lock
280      XE_UnlockForeign,  // unlocking a lock held by some other thread
281      XE_UnlockBogus,    // unlocking an address not known to be a lock
282      XE_PthAPIerror,    // error from the POSIX pthreads API
283      XE_LockOrder,      // lock order error
284      XE_Misc            // misc other error (w/ string to describe it)
285   }
286   XErrorTag;
287
288/* Extra contexts for kinds */
289typedef
290   struct  {
291      XErrorTag tag;
292      union {
293         struct {
294            Addr        data_addr;
295            Int         szB;
296            AddrInfo    data_addrinfo;
297            Bool        isWrite;
298            Thread*     thr;
299            Lock**      locksHeldW;
300            /* h1_* and h2_* provide some description of a previously
301               observed access with which we are conflicting. */
302            Thread*     h1_ct; /* non-NULL means h1 info present */
303            ExeContext* h1_ct_mbsegstartEC;
304            ExeContext* h1_ct_mbsegendEC;
305            Thread*     h2_ct; /* non-NULL means h2 info present */
306            ExeContext* h2_ct_accEC;
307            Int         h2_ct_accSzB;
308            Bool        h2_ct_accIsW;
309            Lock**      h2_ct_locksHeldW;
310         } Race;
311         struct {
312            Thread* thr;  /* doing the unlocking */
313            Lock*   lock; /* lock (that is already unlocked) */
314         } UnlockUnlocked;
315         struct {
316            Thread* thr;    /* doing the unlocking */
317            Thread* owner;  /* thread that actually holds the lock */
318            Lock*   lock;   /* lock (that is held by 'owner') */
319         } UnlockForeign;
320         struct {
321            Thread* thr;     /* doing the unlocking */
322            Addr    lock_ga; /* purported address of the lock */
323         } UnlockBogus;
324         struct {
325            Thread* thr;
326            HChar*  fnname; /* persistent, in tool-arena */
327            Word    err;    /* pth error code */
328            HChar*  errstr; /* persistent, in tool-arena */
329         } PthAPIerror;
330         struct {
331            Thread*     thr;
332            /* The first 4 fields describe the previously observed
333               (should-be) ordering. */
334            Addr        shouldbe_earlier_ga;
335            Addr        shouldbe_later_ga;
336            ExeContext* shouldbe_earlier_ec;
337            ExeContext* shouldbe_later_ec;
338            /* In principle we need to record two more stacks, from
339               this thread, when acquiring the locks in the "wrong"
340               order.  In fact the wallclock-later acquisition by this
341               thread is recorded in the main stack for this error.
342               So we only need a stack for the earlier acquisition by
343               this thread. */
344            ExeContext* actual_earlier_ec;
345         } LockOrder;
346         struct {
347            Thread*     thr;
348            HChar*      errstr; /* persistent, in tool-arena */
349            HChar*      auxstr; /* optional, persistent, in tool-arena */
350            ExeContext* auxctx; /* optional */
351         } Misc;
352      } XE;
353   }
354   XError;
355
356static void init_XError ( XError* xe ) {
357   VG_(memset)(xe, 0, sizeof(*xe) );
358   xe->tag = XE_Race-1; /* bogus */
359}
360
361
362/* Extensions of suppressions */
363typedef
364   enum {
365      XS_Race=1201, /* race */
366      XS_FreeMemLock,
367      XS_UnlockUnlocked,
368      XS_UnlockForeign,
369      XS_UnlockBogus,
370      XS_PthAPIerror,
371      XS_LockOrder,
372      XS_Misc
373   }
374   XSuppTag;
375
376
377/* Updates the copy with address info if necessary. */
378UInt HG_(update_extra) ( Error* err )
379{
380   XError* xe = (XError*)VG_(get_error_extra)(err);
381   tl_assert(xe);
382   //if (extra != NULL && Undescribed == extra->addrinfo.akind) {
383   //   describe_addr ( VG_(get_error_address)(err), &(extra->addrinfo) );
384   //}
385
386   if (xe->tag == XE_Race) {
387
388      /* Note the set of locks that the thread is (w-)holding.
389         Convert the WordSetID of LockN*'s into a NULL-terminated
390         vector of LockP*'s.  We don't expect to encounter any invalid
391         LockNs in this conversion. */
392      tl_assert(xe->XE.Race.thr);
393      xe->XE.Race.locksHeldW
394         = enumerate_WordSet_into_LockP_vector(
395              HG_(get_univ_lsets)(),
396              xe->XE.Race.thr->locksetW,
397              False/*!allowed_to_be_invalid*/
398           );
399
400      /* See if we can come up with a source level description of the
401         raced-upon address.  This is potentially expensive, which is
402         why it's only done at the update_extra point, not when the
403         error is initially created. */
404      static Int xxx = 0;
405      xxx++;
406      if (0)
407         VG_(printf)("HG_(update_extra): "
408                     "%d conflicting-event queries\n", xxx);
409
410      HG_(describe_addr) (xe->XE.Race.data_addr, &xe->XE.Race.data_addrinfo);
411
412      /* And poke around in the conflicting-event map, to see if we
413         can rustle up a plausible-looking conflicting memory access
414         to show. */
415      if (HG_(clo_history_level) >= 2) {
416         Thr*        thrp            = NULL;
417         ExeContext* wherep          = NULL;
418         Addr        acc_addr        = xe->XE.Race.data_addr;
419         Int         acc_szB         = xe->XE.Race.szB;
420         Thr*        acc_thr         = xe->XE.Race.thr->hbthr;
421         Bool        acc_isW         = xe->XE.Race.isWrite;
422         SizeT       conf_szB        = 0;
423         Bool        conf_isW        = False;
424         WordSetID   conf_locksHeldW = 0;
425         tl_assert(!xe->XE.Race.h2_ct_accEC);
426         tl_assert(!xe->XE.Race.h2_ct);
427         if (libhb_event_map_lookup(
428                &wherep, &thrp, &conf_szB, &conf_isW, &conf_locksHeldW,
429                acc_thr, acc_addr, acc_szB, acc_isW )) {
430            Thread* threadp;
431            tl_assert(wherep);
432            tl_assert(thrp);
433            threadp = libhb_get_Thr_hgthread( thrp );
434            tl_assert(threadp);
435            xe->XE.Race.h2_ct_accEC  = wherep;
436            xe->XE.Race.h2_ct        = threadp;
437            xe->XE.Race.h2_ct_accSzB = (Int)conf_szB;
438            xe->XE.Race.h2_ct_accIsW = conf_isW;
439            xe->XE.Race.h2_ct_locksHeldW
440               = enumerate_WordSet_into_LockP_vector(
441                    HG_(get_univ_lsets)(),
442                    conf_locksHeldW,
443                    True/*allowed_to_be_invalid*/
444                 );
445        }
446      }
447
448      // both NULL or both non-NULL
449      tl_assert( (!!xe->XE.Race.h2_ct) == (!!xe->XE.Race.h2_ct_accEC) );
450   }
451
452   return sizeof(XError);
453}
454
455void HG_(record_error_Race) ( Thread* thr,
456                              Addr data_addr, Int szB, Bool isWrite,
457                              Thread* h1_ct,
458                              ExeContext* h1_ct_segstart,
459                              ExeContext* h1_ct_mbsegendEC )
460{
461   XError xe;
462   tl_assert( HG_(is_sane_Thread)(thr) );
463
464#  if defined(VGO_linux)
465   /* Skip any races on locations apparently in GOTPLT sections.  This
466      is said to be caused by ld.so poking PLT table entries (or
467      whatever) when it writes the resolved address of a dynamically
468      linked routine, into the table (or whatever) when it is called
469      for the first time. */
470   {
471     VgSectKind sect = VG_(DebugInfo_sect_kind)( NULL, 0, data_addr );
472     if (0) VG_(printf)("XXXXXXXXX RACE on %#lx %s\n",
473                        data_addr, VG_(pp_SectKind)(sect));
474     /* SectPLT is required on ???-linux */
475     if (sect == Vg_SectGOTPLT) return;
476     /* SectPLT is required on ppc32/64-linux */
477     if (sect == Vg_SectPLT) return;
478   }
479#  endif
480
481   init_XError(&xe);
482   xe.tag = XE_Race;
483   xe.XE.Race.data_addr   = data_addr;
484   xe.XE.Race.szB         = szB;
485   xe.XE.Race.isWrite     = isWrite;
486   xe.XE.Race.thr         = thr;
487   tl_assert(isWrite == False || isWrite == True);
488   tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
489   /* Skip on the detailed description of the raced-on address at this
490      point; it's expensive.  Leave it for the update_extra function
491      if we ever make it that far. */
492   xe.XE.Race.data_addrinfo.tag = Addr_Undescribed;
493   // FIXME: tid vs thr
494   // Skip on any of the conflicting-access info at this point.
495   // It's expensive to obtain, and this error is more likely than
496   // not to be discarded.  We'll fill these fields in in
497   // HG_(update_extra) just above, assuming the error ever makes
498   // it that far (unlikely).
499   xe.XE.Race.h2_ct_accSzB = 0;
500   xe.XE.Race.h2_ct_accIsW = False;
501   xe.XE.Race.h2_ct_accEC  = NULL;
502   xe.XE.Race.h2_ct        = NULL;
503   tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
504   tl_assert( thr->coretid != VG_INVALID_THREADID );
505
506   xe.XE.Race.h1_ct              = h1_ct;
507   xe.XE.Race.h1_ct_mbsegstartEC = h1_ct_segstart;
508   xe.XE.Race.h1_ct_mbsegendEC   = h1_ct_mbsegendEC;
509
510   VG_(maybe_record_error)( thr->coretid,
511                            XE_Race, data_addr, NULL, &xe );
512}
513
514void HG_(record_error_UnlockUnlocked) ( Thread* thr, Lock* lk )
515{
516   XError xe;
517   tl_assert( HG_(is_sane_Thread)(thr) );
518   tl_assert( HG_(is_sane_LockN)(lk) );
519   init_XError(&xe);
520   xe.tag = XE_UnlockUnlocked;
521   xe.XE.UnlockUnlocked.thr
522      = thr;
523   xe.XE.UnlockUnlocked.lock
524      = mk_LockP_from_LockN(lk, False/*!allowed_to_be_invalid*/);
525   // FIXME: tid vs thr
526   tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
527   tl_assert( thr->coretid != VG_INVALID_THREADID );
528   VG_(maybe_record_error)( thr->coretid,
529                            XE_UnlockUnlocked, 0, NULL, &xe );
530}
531
532void HG_(record_error_UnlockForeign) ( Thread* thr,
533                                       Thread* owner, Lock* lk )
534{
535   XError xe;
536   tl_assert( HG_(is_sane_Thread)(thr) );
537   tl_assert( HG_(is_sane_Thread)(owner) );
538   tl_assert( HG_(is_sane_LockN)(lk) );
539   init_XError(&xe);
540   xe.tag = XE_UnlockForeign;
541   xe.XE.UnlockForeign.thr   = thr;
542   xe.XE.UnlockForeign.owner = owner;
543   xe.XE.UnlockForeign.lock
544      = mk_LockP_from_LockN(lk, False/*!allowed_to_be_invalid*/);
545   // FIXME: tid vs thr
546   tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
547   tl_assert( thr->coretid != VG_INVALID_THREADID );
548   VG_(maybe_record_error)( thr->coretid,
549                            XE_UnlockForeign, 0, NULL, &xe );
550}
551
552void HG_(record_error_UnlockBogus) ( Thread* thr, Addr lock_ga )
553{
554   XError xe;
555   tl_assert( HG_(is_sane_Thread)(thr) );
556   init_XError(&xe);
557   xe.tag = XE_UnlockBogus;
558   xe.XE.UnlockBogus.thr     = thr;
559   xe.XE.UnlockBogus.lock_ga = lock_ga;
560   // FIXME: tid vs thr
561   tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
562   tl_assert( thr->coretid != VG_INVALID_THREADID );
563   VG_(maybe_record_error)( thr->coretid,
564                            XE_UnlockBogus, 0, NULL, &xe );
565}
566
567void HG_(record_error_LockOrder)(
568        Thread*     thr,
569        Addr        shouldbe_earlier_ga,
570        Addr        shouldbe_later_ga,
571        ExeContext* shouldbe_earlier_ec,
572        ExeContext* shouldbe_later_ec,
573        ExeContext* actual_earlier_ec
574     )
575{
576   XError xe;
577   tl_assert( HG_(is_sane_Thread)(thr) );
578   tl_assert(HG_(clo_track_lockorders));
579   init_XError(&xe);
580   xe.tag = XE_LockOrder;
581   xe.XE.LockOrder.thr       = thr;
582   xe.XE.LockOrder.shouldbe_earlier_ga = shouldbe_earlier_ga;
583   xe.XE.LockOrder.shouldbe_earlier_ec = shouldbe_earlier_ec;
584   xe.XE.LockOrder.shouldbe_later_ga   = shouldbe_later_ga;
585   xe.XE.LockOrder.shouldbe_later_ec   = shouldbe_later_ec;
586   xe.XE.LockOrder.actual_earlier_ec   = actual_earlier_ec;
587   // FIXME: tid vs thr
588   tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
589   tl_assert( thr->coretid != VG_INVALID_THREADID );
590   VG_(maybe_record_error)( thr->coretid,
591                            XE_LockOrder, 0, NULL, &xe );
592}
593
594void HG_(record_error_PthAPIerror) ( Thread* thr, const HChar* fnname,
595                                     Word err, const HChar* errstr )
596{
597   XError xe;
598   tl_assert( HG_(is_sane_Thread)(thr) );
599   tl_assert(fnname);
600   tl_assert(errstr);
601   init_XError(&xe);
602   xe.tag = XE_PthAPIerror;
603   xe.XE.PthAPIerror.thr    = thr;
604   xe.XE.PthAPIerror.fnname = string_table_strdup(fnname);
605   xe.XE.PthAPIerror.err    = err;
606   xe.XE.PthAPIerror.errstr = string_table_strdup(errstr);
607   // FIXME: tid vs thr
608   tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
609   tl_assert( thr->coretid != VG_INVALID_THREADID );
610   VG_(maybe_record_error)( thr->coretid,
611                            XE_PthAPIerror, 0, NULL, &xe );
612}
613
614void HG_(record_error_Misc_w_aux) ( Thread* thr, const HChar* errstr,
615                                    const HChar* auxstr, ExeContext* auxctx )
616{
617   XError xe;
618   tl_assert( HG_(is_sane_Thread)(thr) );
619   tl_assert(errstr);
620   init_XError(&xe);
621   xe.tag = XE_Misc;
622   xe.XE.Misc.thr    = thr;
623   xe.XE.Misc.errstr = string_table_strdup(errstr);
624   xe.XE.Misc.auxstr = auxstr ? string_table_strdup(auxstr) : NULL;
625   xe.XE.Misc.auxctx = auxctx;
626   // FIXME: tid vs thr
627   tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
628   tl_assert( thr->coretid != VG_INVALID_THREADID );
629   VG_(maybe_record_error)( thr->coretid,
630                            XE_Misc, 0, NULL, &xe );
631}
632
633void HG_(record_error_Misc) ( Thread* thr, const HChar* errstr )
634{
635   HG_(record_error_Misc_w_aux)(thr, errstr, NULL, NULL);
636}
637
638Bool HG_(eq_Error) ( VgRes not_used, Error* e1, Error* e2 )
639{
640   XError *xe1, *xe2;
641
642   tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
643
644   xe1 = (XError*)VG_(get_error_extra)(e1);
645   xe2 = (XError*)VG_(get_error_extra)(e2);
646   tl_assert(xe1);
647   tl_assert(xe2);
648
649   switch (VG_(get_error_kind)(e1)) {
650      case XE_Race:
651         return xe1->XE.Race.szB == xe2->XE.Race.szB
652                && xe1->XE.Race.isWrite == xe2->XE.Race.isWrite
653                && (HG_(clo_cmp_race_err_addrs)
654                       ? xe1->XE.Race.data_addr == xe2->XE.Race.data_addr
655                       : True);
656      case XE_UnlockUnlocked:
657         return xe1->XE.UnlockUnlocked.thr == xe2->XE.UnlockUnlocked.thr
658                && xe1->XE.UnlockUnlocked.lock == xe2->XE.UnlockUnlocked.lock;
659      case XE_UnlockForeign:
660         return xe1->XE.UnlockForeign.thr == xe2->XE.UnlockForeign.thr
661                && xe1->XE.UnlockForeign.owner == xe2->XE.UnlockForeign.owner
662                && xe1->XE.UnlockForeign.lock == xe2->XE.UnlockForeign.lock;
663      case XE_UnlockBogus:
664         return xe1->XE.UnlockBogus.thr == xe2->XE.UnlockBogus.thr
665                && xe1->XE.UnlockBogus.lock_ga == xe2->XE.UnlockBogus.lock_ga;
666      case XE_PthAPIerror:
667         return xe1->XE.PthAPIerror.thr == xe2->XE.PthAPIerror.thr
668                && 0==VG_(strcmp)(xe1->XE.PthAPIerror.fnname,
669                                  xe2->XE.PthAPIerror.fnname)
670                && xe1->XE.PthAPIerror.err == xe2->XE.PthAPIerror.err;
671      case XE_LockOrder:
672         return xe1->XE.LockOrder.thr == xe2->XE.LockOrder.thr;
673      case XE_Misc:
674         return xe1->XE.Misc.thr == xe2->XE.Misc.thr
675                && 0==VG_(strcmp)(xe1->XE.Misc.errstr, xe2->XE.Misc.errstr);
676      default:
677         tl_assert(0);
678   }
679
680   /*NOTREACHED*/
681   tl_assert(0);
682}
683
684
685/*----------------------------------------------------------------*/
686/*--- Error management -- printing                             ---*/
687/*----------------------------------------------------------------*/
688
689/* Do a printf-style operation on either the XML or normal output
690   channel, depending on the setting of VG_(clo_xml).
691*/
692static void emit_WRK ( const HChar* format, va_list vargs )
693{
694   if (VG_(clo_xml)) {
695      VG_(vprintf_xml)(format, vargs);
696   } else {
697      VG_(vmessage)(Vg_UserMsg, format, vargs);
698   }
699}
700static void emit ( const HChar* format, ... ) PRINTF_CHECK(1, 2);
701static void emit ( const HChar* format, ... )
702{
703   va_list vargs;
704   va_start(vargs, format);
705   emit_WRK(format, vargs);
706   va_end(vargs);
707}
708
709
710/* Announce (that is, print the point-of-creation) of 'thr'.  Only do
711   this once, as we only want to see these announcements once per
712   thread.  Returned Bool indicates whether or not an announcement was
713   made.
714*/
715static Bool announce_one_thread ( Thread* thr )
716{
717   tl_assert(HG_(is_sane_Thread)(thr));
718   tl_assert(thr->errmsg_index >= 1);
719   if (thr->announced)
720      return False;
721
722   if (VG_(clo_xml)) {
723
724      VG_(printf_xml)("<announcethread>\n");
725      VG_(printf_xml)("  <hthreadid>%d</hthreadid>\n", thr->errmsg_index);
726      if (thr->errmsg_index == 1) {
727         tl_assert(thr->created_at == NULL);
728         VG_(printf_xml)("  <isrootthread></isrootthread>\n");
729      } else {
730         tl_assert(thr->created_at != NULL);
731         VG_(pp_ExeContext)( thr->created_at );
732      }
733      VG_(printf_xml)("</announcethread>\n\n");
734
735   } else {
736
737      VG_(umsg)("---Thread-Announcement----------"
738                "--------------------------------" "\n");
739      VG_(umsg)("\n");
740
741      if (thr->errmsg_index == 1) {
742         tl_assert(thr->created_at == NULL);
743         VG_(message)(Vg_UserMsg,
744                      "Thread #%d is the program's root thread\n",
745                       thr->errmsg_index);
746      } else {
747         tl_assert(thr->created_at != NULL);
748         VG_(message)(Vg_UserMsg, "Thread #%d was created\n",
749                                  thr->errmsg_index);
750         VG_(pp_ExeContext)( thr->created_at );
751      }
752      VG_(message)(Vg_UserMsg, "\n");
753
754   }
755
756   thr->announced = True;
757   return True;
758}
759
760/* Announce 'lk'. */
761static void announce_LockP ( Lock* lk )
762{
763   tl_assert(lk);
764   if (lk == Lock_INVALID)
765      return; /* Can't be announced -- we know nothing about it. */
766   tl_assert(lk->magic == LockP_MAGIC);
767   if (!lk->appeared_at)
768     return; /* There's nothing we can show */
769
770   if (VG_(clo_xml)) {
771      /* fixme: add announcement */
772   } else {
773      VG_(umsg)( "Lock at %p was first observed\n",
774                 (void*)lk->guestaddr );
775      VG_(pp_ExeContext)( lk->appeared_at );
776      VG_(umsg)("\n");
777   }
778}
779
780/* Announce (that is, print point-of-first-observation) for the
781   locks in 'lockvec' and, if non-NULL, 'lockvec2'. */
782static void announce_combined_LockP_vecs ( Lock** lockvec,
783                                           Lock** lockvec2 )
784{
785   UWord i;
786   tl_assert(lockvec);
787   for (i = 0; lockvec[i]; i++) {
788      announce_LockP(lockvec[i]);
789   }
790   if (lockvec2) {
791      for (i = 0; lockvec2[i]; i++) {
792         Lock* lk = lockvec2[i];
793         if (!elem_LockP_vector(lockvec, lk))
794            announce_LockP(lk);
795      }
796   }
797}
798
799
800static void show_LockP_summary_textmode ( Lock** locks, const HChar* pre )
801{
802   tl_assert(locks);
803   UWord i;
804   UWord nLocks = 0, nLocksValid = 0;
805   count_LockP_vector(&nLocks, &nLocksValid, locks);
806   tl_assert(nLocksValid <= nLocks);
807
808   if (nLocks == 0) {
809      VG_(umsg)( "%sLocks held: none", pre );
810   } else {
811      VG_(umsg)( "%sLocks held: %lu, at address%s ",
812                 pre, nLocks, nLocksValid == 1 ? "" : "es" );
813   }
814
815   if (nLocks > 0) {
816      for (i = 0; i < nLocks; i++) {
817         if (locks[i] == Lock_INVALID)
818            continue;
819         VG_(umsg)( "%p", (void*)locks[i]->guestaddr);
820         if (locks[i+1] != NULL)
821            VG_(umsg)(" ");
822      }
823      if (nLocksValid < nLocks)
824         VG_(umsg)(" (and %lu that can't be shown)", nLocks - nLocksValid);
825   }
826   VG_(umsg)("\n");
827}
828
829
830/* This is the "this error is due to be printed shortly; so have a
831   look at it any print any preamble you want" function.  We use it to
832   announce any previously un-announced threads in the upcoming error
833   message.
834*/
835void HG_(before_pp_Error) ( Error* err )
836{
837   XError* xe;
838   tl_assert(err);
839   xe = (XError*)VG_(get_error_extra)(err);
840   tl_assert(xe);
841
842   switch (VG_(get_error_kind)(err)) {
843      case XE_Misc:
844         announce_one_thread( xe->XE.Misc.thr );
845         break;
846      case XE_LockOrder:
847         announce_one_thread( xe->XE.LockOrder.thr );
848         break;
849      case XE_PthAPIerror:
850         announce_one_thread( xe->XE.PthAPIerror.thr );
851         break;
852      case XE_UnlockBogus:
853         announce_one_thread( xe->XE.UnlockBogus.thr );
854         break;
855      case XE_UnlockForeign:
856         announce_one_thread( xe->XE.UnlockForeign.thr );
857         announce_one_thread( xe->XE.UnlockForeign.owner );
858         break;
859      case XE_UnlockUnlocked:
860         announce_one_thread( xe->XE.UnlockUnlocked.thr );
861         break;
862      case XE_Race:
863         announce_one_thread( xe->XE.Race.thr );
864         if (xe->XE.Race.h2_ct)
865            announce_one_thread( xe->XE.Race.h2_ct );
866         if (xe->XE.Race.h1_ct)
867            announce_one_thread( xe->XE.Race.h1_ct );
868         break;
869      default:
870         tl_assert(0);
871   }
872}
873
874void HG_(pp_Error) ( Error* err )
875{
876   const Bool xml = VG_(clo_xml); /* a shorthand, that's all */
877
878   if (!xml) {
879      VG_(umsg)("--------------------------------"
880                "--------------------------------" "\n");
881      VG_(umsg)("\n");
882   }
883
884   XError *xe = (XError*)VG_(get_error_extra)(err);
885   tl_assert(xe);
886
887   if (xml)
888      emit( "  <kind>%s</kind>\n", HG_(get_error_name)(err));
889
890   switch (VG_(get_error_kind)(err)) {
891
892   case XE_Misc: {
893      tl_assert( HG_(is_sane_Thread)( xe->XE.Misc.thr ) );
894
895      if (xml) {
896
897         emit( "  <xwhat>\n" );
898         emit( "    <text>Thread #%d: %s</text>\n",
899               (Int)xe->XE.Misc.thr->errmsg_index,
900               xe->XE.Misc.errstr );
901         emit( "    <hthreadid>%d</hthreadid>\n",
902               (Int)xe->XE.Misc.thr->errmsg_index );
903         emit( "  </xwhat>\n" );
904         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
905         if (xe->XE.Misc.auxstr) {
906            emit("  <auxwhat>%s</auxwhat>\n", xe->XE.Misc.auxstr);
907            if (xe->XE.Misc.auxctx)
908               VG_(pp_ExeContext)( xe->XE.Misc.auxctx );
909         }
910
911      } else {
912
913         emit( "Thread #%d: %s\n",
914               (Int)xe->XE.Misc.thr->errmsg_index,
915               xe->XE.Misc.errstr );
916         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
917         if (xe->XE.Misc.auxstr) {
918            emit(" %s\n", xe->XE.Misc.auxstr);
919            if (xe->XE.Misc.auxctx)
920               VG_(pp_ExeContext)( xe->XE.Misc.auxctx );
921         }
922
923      }
924      break;
925   }
926
927   case XE_LockOrder: {
928      tl_assert( HG_(is_sane_Thread)( xe->XE.LockOrder.thr ) );
929
930      if (xml) {
931
932         emit( "  <xwhat>\n" );
933         emit( "    <text>Thread #%d: lock order \"%p before %p\" "
934                    "violated</text>\n",
935               (Int)xe->XE.LockOrder.thr->errmsg_index,
936               (void*)xe->XE.LockOrder.shouldbe_earlier_ga,
937               (void*)xe->XE.LockOrder.shouldbe_later_ga );
938         emit( "    <hthreadid>%d</hthreadid>\n",
939               (Int)xe->XE.LockOrder.thr->errmsg_index );
940         emit( "  </xwhat>\n" );
941         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
942         if (xe->XE.LockOrder.shouldbe_earlier_ec
943             && xe->XE.LockOrder.shouldbe_later_ec) {
944            emit( "  <auxwhat>Required order was established by "
945                  "acquisition of lock at %p</auxwhat>\n",
946                  (void*)xe->XE.LockOrder.shouldbe_earlier_ga );
947            VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_earlier_ec );
948            emit( "  <auxwhat>followed by a later acquisition "
949                  "of lock at %p</auxwhat>\n",
950                  (void*)xe->XE.LockOrder.shouldbe_later_ga );
951            VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_later_ec );
952         }
953
954      } else {
955
956         emit( "Thread #%d: lock order \"%p before %p\" violated\n",
957               (Int)xe->XE.LockOrder.thr->errmsg_index,
958               (void*)xe->XE.LockOrder.shouldbe_earlier_ga,
959               (void*)xe->XE.LockOrder.shouldbe_later_ga );
960         emit( "\n" );
961         emit( "Observed (incorrect) order is: "
962               "acquisition of lock at %p\n",
963               (void*)xe->XE.LockOrder.shouldbe_later_ga);
964         if (xe->XE.LockOrder.actual_earlier_ec) {
965             VG_(pp_ExeContext)(xe->XE.LockOrder.actual_earlier_ec);
966         } else {
967            emit("   (stack unavailable)\n");
968         }
969         emit( "\n" );
970         emit(" followed by a later acquisition of lock at %p\n",
971              (void*)xe->XE.LockOrder.shouldbe_earlier_ga);
972         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
973         if (xe->XE.LockOrder.shouldbe_earlier_ec
974             && xe->XE.LockOrder.shouldbe_later_ec) {
975            emit("\n");
976            emit( "Required order was established by "
977                  "acquisition of lock at %p\n",
978                  (void*)xe->XE.LockOrder.shouldbe_earlier_ga );
979            VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_earlier_ec );
980            emit( "\n" );
981            emit( " followed by a later acquisition of lock at %p\n",
982                  (void*)xe->XE.LockOrder.shouldbe_later_ga );
983            VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_later_ec );
984         }
985
986      }
987
988      break;
989   }
990
991   case XE_PthAPIerror: {
992      tl_assert( HG_(is_sane_Thread)( xe->XE.PthAPIerror.thr ) );
993
994      if (xml) {
995
996         emit( "  <xwhat>\n" );
997         emit(
998            "    <text>Thread #%d's call to %pS failed</text>\n",
999            (Int)xe->XE.PthAPIerror.thr->errmsg_index,
1000            xe->XE.PthAPIerror.fnname );
1001         emit( "    <hthreadid>%d</hthreadid>\n",
1002               (Int)xe->XE.PthAPIerror.thr->errmsg_index );
1003         emit( "  </xwhat>\n" );
1004         emit( "  <what>with error code %ld (%s)</what>\n",
1005               xe->XE.PthAPIerror.err, xe->XE.PthAPIerror.errstr );
1006         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1007
1008      } else {
1009
1010         emit( "Thread #%d's call to %pS failed\n",
1011                      (Int)xe->XE.PthAPIerror.thr->errmsg_index,
1012                      xe->XE.PthAPIerror.fnname );
1013         emit( "   with error code %ld (%s)\n",
1014               xe->XE.PthAPIerror.err, xe->XE.PthAPIerror.errstr );
1015         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1016
1017      }
1018
1019      break;
1020   }
1021
1022   case XE_UnlockBogus: {
1023      tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockBogus.thr ) );
1024
1025      if (xml) {
1026
1027         emit( "  <xwhat>\n" );
1028         emit( "    <text>Thread #%d unlocked an invalid "
1029                    "lock at %p</text>\n",
1030               (Int)xe->XE.UnlockBogus.thr->errmsg_index,
1031               (void*)xe->XE.UnlockBogus.lock_ga );
1032         emit( "    <hthreadid>%d</hthreadid>\n",
1033               (Int)xe->XE.UnlockBogus.thr->errmsg_index );
1034         emit( "  </xwhat>\n" );
1035         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1036
1037      } else {
1038
1039         emit( "Thread #%d unlocked an invalid lock at %p\n",
1040               (Int)xe->XE.UnlockBogus.thr->errmsg_index,
1041               (void*)xe->XE.UnlockBogus.lock_ga );
1042         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1043
1044      }
1045
1046      break;
1047   }
1048
1049   case XE_UnlockForeign: {
1050      tl_assert( HG_(is_sane_LockP)( xe->XE.UnlockForeign.lock ) );
1051      tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockForeign.owner ) );
1052      tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockForeign.thr ) );
1053
1054      if (xml) {
1055
1056         emit( "  <xwhat>\n" );
1057         emit( "    <text>Thread #%d unlocked lock at %p "
1058                    "currently held by thread #%d</text>\n",
1059               (Int)xe->XE.UnlockForeign.thr->errmsg_index,
1060               (void*)xe->XE.UnlockForeign.lock->guestaddr,
1061               (Int)xe->XE.UnlockForeign.owner->errmsg_index );
1062         emit( "    <hthreadid>%d</hthreadid>\n",
1063               (Int)xe->XE.UnlockForeign.thr->errmsg_index );
1064         emit( "    <hthreadid>%d</hthreadid>\n",
1065               (Int)xe->XE.UnlockForeign.owner->errmsg_index );
1066         emit( "  </xwhat>\n" );
1067         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1068
1069         if (xe->XE.UnlockForeign.lock->appeared_at) {
1070            emit( "  <auxwhat>Lock at %p was first observed</auxwhat>\n",
1071                  (void*)xe->XE.UnlockForeign.lock->guestaddr );
1072            VG_(pp_ExeContext)( xe->XE.UnlockForeign.lock->appeared_at );
1073         }
1074
1075      } else {
1076
1077         emit( "Thread #%d unlocked lock at %p "
1078               "currently held by thread #%d\n",
1079               (Int)xe->XE.UnlockForeign.thr->errmsg_index,
1080               (void*)xe->XE.UnlockForeign.lock->guestaddr,
1081               (Int)xe->XE.UnlockForeign.owner->errmsg_index );
1082         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1083         if (xe->XE.UnlockForeign.lock->appeared_at) {
1084            emit( "  Lock at %p was first observed\n",
1085                  (void*)xe->XE.UnlockForeign.lock->guestaddr );
1086            VG_(pp_ExeContext)( xe->XE.UnlockForeign.lock->appeared_at );
1087         }
1088
1089      }
1090
1091      break;
1092   }
1093
1094   case XE_UnlockUnlocked: {
1095      tl_assert( HG_(is_sane_LockP)( xe->XE.UnlockUnlocked.lock ) );
1096      tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockUnlocked.thr ) );
1097
1098      if (xml) {
1099
1100         emit( "  <xwhat>\n" );
1101         emit( "    <text>Thread #%d unlocked a "
1102                    "not-locked lock at %p</text>\n",
1103               (Int)xe->XE.UnlockUnlocked.thr->errmsg_index,
1104               (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
1105         emit( "    <hthreadid>%d</hthreadid>\n",
1106               (Int)xe->XE.UnlockUnlocked.thr->errmsg_index );
1107         emit( "  </xwhat>\n" );
1108         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1109         if (xe->XE.UnlockUnlocked.lock->appeared_at) {
1110            emit( "  <auxwhat>Lock at %p was first observed</auxwhat>\n",
1111                  (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
1112            VG_(pp_ExeContext)( xe->XE.UnlockUnlocked.lock->appeared_at );
1113         }
1114
1115      } else {
1116
1117         emit( "Thread #%d unlocked a not-locked lock at %p\n",
1118               (Int)xe->XE.UnlockUnlocked.thr->errmsg_index,
1119               (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
1120         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1121         if (xe->XE.UnlockUnlocked.lock->appeared_at) {
1122            emit( "  Lock at %p was first observed\n",
1123                  (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
1124            VG_(pp_ExeContext)( xe->XE.UnlockUnlocked.lock->appeared_at );
1125         }
1126
1127      }
1128
1129      break;
1130   }
1131
1132   case XE_Race: {
1133      Addr      err_ga;
1134      const HChar* what;
1135      Int       szB;
1136      what      = xe->XE.Race.isWrite ? "write" : "read";
1137      szB       = xe->XE.Race.szB;
1138      err_ga = VG_(get_error_address)(err);
1139
1140      tl_assert( HG_(is_sane_Thread)( xe->XE.Race.thr ));
1141      if (xe->XE.Race.h2_ct)
1142         tl_assert( HG_(is_sane_Thread)( xe->XE.Race.h2_ct ));
1143
1144      if (xml) {
1145
1146         /* ------ XML ------ */
1147         emit( "  <xwhat>\n" );
1148         emit( "    <text>Possible data race during %s of size %d "
1149                    "at %p by thread #%d</text>\n",
1150               what, szB, (void*)err_ga, (Int)xe->XE.Race.thr->errmsg_index );
1151         emit( "    <hthreadid>%d</hthreadid>\n",
1152               (Int)xe->XE.Race.thr->errmsg_index );
1153         emit( "  </xwhat>\n" );
1154         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1155
1156         if (xe->XE.Race.h2_ct) {
1157            tl_assert(xe->XE.Race.h2_ct_accEC); // assured by update_extra
1158            emit( "  <xauxwhat>\n");
1159            emit( "    <text>This conflicts with a previous %s of size %d "
1160                            "by thread #%d</text>\n",
1161                  xe->XE.Race.h2_ct_accIsW ? "write" : "read",
1162                  xe->XE.Race.h2_ct_accSzB,
1163                  xe->XE.Race.h2_ct->errmsg_index );
1164            emit( "    <hthreadid>%d</hthreadid>\n",
1165                  xe->XE.Race.h2_ct->errmsg_index);
1166            emit("  </xauxwhat>\n");
1167            VG_(pp_ExeContext)( xe->XE.Race.h2_ct_accEC );
1168         }
1169
1170         if (xe->XE.Race.h1_ct) {
1171            emit( "  <xauxwhat>\n");
1172            emit( "    <text>This conflicts with a previous access "
1173                  "by thread #%d, after</text>\n",
1174                  xe->XE.Race.h1_ct->errmsg_index );
1175            emit( "    <hthreadid>%d</hthreadid>\n",
1176                  xe->XE.Race.h1_ct->errmsg_index );
1177            emit("  </xauxwhat>\n");
1178            if (xe->XE.Race.h1_ct_mbsegstartEC) {
1179               VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegstartEC );
1180            } else {
1181               emit( "  <auxwhat>(the start of the thread)</auxwhat>\n" );
1182            }
1183            emit( "  <auxwhat>but before</auxwhat>\n" );
1184            if (xe->XE.Race.h1_ct_mbsegendEC) {
1185               VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegendEC );
1186            } else {
1187               emit( "  <auxwhat>(the end of the the thread)</auxwhat>\n" );
1188            }
1189         }
1190
1191      } else {
1192
1193         /* ------ Text ------ */
1194         announce_combined_LockP_vecs( xe->XE.Race.locksHeldW,
1195                                       xe->XE.Race.h2_ct_locksHeldW );
1196
1197         emit( "Possible data race during %s of size %d "
1198               "at %p by thread #%d\n",
1199               what, szB, (void*)err_ga, (Int)xe->XE.Race.thr->errmsg_index );
1200
1201         tl_assert(xe->XE.Race.locksHeldW);
1202         show_LockP_summary_textmode( xe->XE.Race.locksHeldW, "" );
1203         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1204
1205         if (xe->XE.Race.h2_ct) {
1206            tl_assert(xe->XE.Race.h2_ct_accEC); // assured by update_extra
1207            tl_assert(xe->XE.Race.h2_ct_locksHeldW);
1208            emit( "\n" );
1209            emit( "This conflicts with a previous %s of size %d "
1210                  "by thread #%d\n",
1211                  xe->XE.Race.h2_ct_accIsW ? "write" : "read",
1212                  xe->XE.Race.h2_ct_accSzB,
1213                  xe->XE.Race.h2_ct->errmsg_index );
1214            show_LockP_summary_textmode( xe->XE.Race.h2_ct_locksHeldW, "" );
1215            VG_(pp_ExeContext)( xe->XE.Race.h2_ct_accEC );
1216         }
1217
1218         if (xe->XE.Race.h1_ct) {
1219            emit( " This conflicts with a previous access by thread #%d, "
1220                  "after\n",
1221                  xe->XE.Race.h1_ct->errmsg_index );
1222            if (xe->XE.Race.h1_ct_mbsegstartEC) {
1223               VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegstartEC );
1224            } else {
1225               emit( "   (the start of the thread)\n" );
1226            }
1227            emit( " but before\n" );
1228            if (xe->XE.Race.h1_ct_mbsegendEC) {
1229               VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegendEC );
1230            } else {
1231               emit( "   (the end of the the thread)\n" );
1232            }
1233         }
1234
1235      }
1236      VG_(pp_addrinfo) (err_ga, &xe->XE.Race.data_addrinfo);
1237      break; /* case XE_Race */
1238   } /* case XE_Race */
1239
1240   default:
1241      tl_assert(0);
1242   } /* switch (VG_(get_error_kind)(err)) */
1243}
1244
1245const HChar* HG_(get_error_name) ( Error* err )
1246{
1247   switch (VG_(get_error_kind)(err)) {
1248      case XE_Race:           return "Race";
1249      case XE_UnlockUnlocked: return "UnlockUnlocked";
1250      case XE_UnlockForeign:  return "UnlockForeign";
1251      case XE_UnlockBogus:    return "UnlockBogus";
1252      case XE_PthAPIerror:    return "PthAPIerror";
1253      case XE_LockOrder:      return "LockOrder";
1254      case XE_Misc:           return "Misc";
1255      default: tl_assert(0); /* fill in missing case */
1256   }
1257}
1258
1259Bool HG_(recognised_suppression) ( const HChar* name, Supp *su )
1260{
1261#  define TRY(_name,_xskind)                   \
1262      if (0 == VG_(strcmp)(name, (_name))) {   \
1263         VG_(set_supp_kind)(su, (_xskind));    \
1264         return True;                          \
1265      }
1266   TRY("Race",           XS_Race);
1267   TRY("FreeMemLock",    XS_FreeMemLock);
1268   TRY("UnlockUnlocked", XS_UnlockUnlocked);
1269   TRY("UnlockForeign",  XS_UnlockForeign);
1270   TRY("UnlockBogus",    XS_UnlockBogus);
1271   TRY("PthAPIerror",    XS_PthAPIerror);
1272   TRY("LockOrder",      XS_LockOrder);
1273   TRY("Misc",           XS_Misc);
1274   return False;
1275#  undef TRY
1276}
1277
1278Bool HG_(read_extra_suppression_info) ( Int fd, HChar** bufpp, SizeT* nBufp,
1279                                        Int* lineno, Supp* su )
1280{
1281   /* do nothing -- no extra suppression info present.  Return True to
1282      indicate nothing bad happened. */
1283   return True;
1284}
1285
1286Bool HG_(error_matches_suppression) ( Error* err, Supp* su )
1287{
1288   switch (VG_(get_supp_kind)(su)) {
1289   case XS_Race:           return VG_(get_error_kind)(err) == XE_Race;
1290   case XS_UnlockUnlocked: return VG_(get_error_kind)(err) == XE_UnlockUnlocked;
1291   case XS_UnlockForeign:  return VG_(get_error_kind)(err) == XE_UnlockForeign;
1292   case XS_UnlockBogus:    return VG_(get_error_kind)(err) == XE_UnlockBogus;
1293   case XS_PthAPIerror:    return VG_(get_error_kind)(err) == XE_PthAPIerror;
1294   case XS_LockOrder:      return VG_(get_error_kind)(err) == XE_LockOrder;
1295   case XS_Misc:           return VG_(get_error_kind)(err) == XE_Misc;
1296   //case XS_: return VG_(get_error_kind)(err) == XE_;
1297   default: tl_assert(0); /* fill in missing cases */
1298   }
1299}
1300
1301Bool HG_(get_extra_suppression_info) ( Error* err,
1302                                       /*OUT*/HChar* buf, Int nBuf )
1303{
1304   /* Do nothing */
1305   return False;
1306}
1307
1308Bool HG_(print_extra_suppression_use) ( Supp* su,
1309                                        /*OUT*/HChar* buf, Int nBuf )
1310{
1311   /* Do nothing */
1312   return False;
1313}
1314
1315void HG_(update_extra_suppression_use) ( Error* err, Supp* su )
1316{
1317   /* Do nothing */
1318   return;
1319}
1320
1321
1322/*--------------------------------------------------------------------*/
1323/*--- end                                              hg_errors.c ---*/
1324/*--------------------------------------------------------------------*/
1325