1
2/*--------------------------------------------------------------------*/
3/*--- Error management for Helgrind.                               ---*/
4/*---                                                  hg_errors.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8   This file is part of Helgrind, a Valgrind tool for detecting errors
9   in threaded programs.
10
11   Copyright (C) 2007-2017 OpenWorks Ltd
12      info@open-works.co.uk
13
14   This program is free software; you can redistribute it and/or
15   modify it under the terms of the GNU General Public License as
16   published by the Free Software Foundation; either version 2 of the
17   License, or (at your option) any later version.
18
19   This program is distributed in the hope that it will be useful, but
20   WITHOUT ANY WARRANTY; without even the implied warranty of
21   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22   General Public License for more details.
23
24   You should have received a copy of the GNU General Public License
25   along with this program; if not, write to the Free Software
26   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27   02111-1307, USA.
28
29   The GNU General Public License is contained in the file COPYING.
30*/
31
32#include "pub_tool_basics.h"
33#include "pub_tool_libcbase.h"
34#include "pub_tool_libcassert.h"
35#include "pub_tool_libcprint.h"
36#include "pub_tool_stacktrace.h"
37#include "pub_tool_execontext.h"
38#include "pub_tool_errormgr.h"
39#include "pub_tool_wordfm.h"
40#include "pub_tool_xarray.h"
41#include "pub_tool_debuginfo.h"
42#include "pub_tool_threadstate.h"
43#include "pub_tool_options.h"     // VG_(clo_xml)
44#include "pub_tool_aspacemgr.h"
45#include "pub_tool_addrinfo.h"
46
47#include "hg_basics.h"
48#include "hg_addrdescr.h"
49#include "hg_wordset.h"
50#include "hg_lock_n_thread.h"
51#include "libhb.h"
52#include "hg_errors.h"            /* self */
53
54
55/*----------------------------------------------------------------*/
56/*--- Error management -- storage                              ---*/
57/*----------------------------------------------------------------*/
58
59/* maps (by value) strings to a copy of them in ARENA_TOOL */
60
61static WordFM* string_table = NULL;
62
63ULong HG_(stats__string_table_queries) = 0;
64
65ULong HG_(stats__string_table_get_map_size) ( void ) {
66   return string_table ? (ULong)VG_(sizeFM)(string_table) : 0;
67}
68
69static Word string_table_cmp ( UWord s1, UWord s2 ) {
70   return (Word)VG_(strcmp)( (HChar*)s1, (HChar*)s2 );
71}
72
73static HChar* string_table_strdup ( const HChar* str ) {
74   HChar* copy = NULL;
75   HG_(stats__string_table_queries)++;
76   if (!str)
77      str = "(null)";
78   if (!string_table) {
79      string_table = VG_(newFM)( HG_(zalloc), "hg.sts.1",
80                                 HG_(free), string_table_cmp );
81   }
82   if (VG_(lookupFM)( string_table,
83                      NULL, (UWord*)&copy, (UWord)str )) {
84      tl_assert(copy);
85      if (0) VG_(printf)("string_table_strdup: %p -> %p\n", str, copy );
86      return copy;
87   } else {
88      copy = HG_(strdup)("hg.sts.2", str);
89      VG_(addToFM)( string_table, (UWord)copy, (UWord)copy );
90      return copy;
91   }
92}
93
94/* maps from Lock .unique fields to LockP*s */
95
96static WordFM* map_LockN_to_P = NULL;
97
98ULong HG_(stats__LockN_to_P_queries) = 0;
99
100ULong HG_(stats__LockN_to_P_get_map_size) ( void ) {
101   return map_LockN_to_P ? (ULong)VG_(sizeFM)(map_LockN_to_P) : 0;
102}
103
104static Word lock_unique_cmp ( UWord lk1W, UWord lk2W )
105{
106   Lock* lk1 = (Lock*)lk1W;
107   Lock* lk2 = (Lock*)lk2W;
108   tl_assert( HG_(is_sane_LockNorP)(lk1) );
109   tl_assert( HG_(is_sane_LockNorP)(lk2) );
110   if (lk1->unique < lk2->unique) return -1;
111   if (lk1->unique > lk2->unique) return 1;
112   return 0;
113}
114
115/* Given a normal Lock (LockN), convert it to a persistent Lock
116   (LockP).  In some cases the LockN could be invalid (if it's been
117   freed), so we enquire, in hg_main.c's admin_locks list, whether it
118   is in fact valid.  If allowed_to_be_invalid is True, then it's OK
119   for the LockN to be invalid, in which case Lock_INVALID is
120   returned.  In all other cases, we insist that the LockN is a valid
121   lock, and return its corresponding LockP.
122
123   Why can LockNs sometimes be invalid?  Because they are harvested
124   from locksets that are attached to the OldRef info for conflicting
125   threads.  By the time we detect a race, the some of the elements of
126   the lockset may have been destroyed by the client, in which case
127   the corresponding Lock structures we maintain will have been freed.
128
129   So we check that each LockN is a member of the admin_locks double
130   linked list of all Lock structures.  That stops us prodding around
131   in potentially freed-up Lock structures.  However, it's not quite a
132   proper check: if a new Lock has been reallocated at the same
133   address as one which was previously freed, we'll wind up copying
134   the new one as the basis for the LockP, which is completely bogus
135   because it is unrelated to the previous Lock that lived there.
136   Let's hope that doesn't happen too often.
137*/
138static Lock* mk_LockP_from_LockN ( Lock* lkn,
139                                   Bool allowed_to_be_invalid )
140{
141   Lock* lkp = NULL;
142   HG_(stats__LockN_to_P_queries)++;
143
144   /* First off, let's do some sanity checks.  If
145      allowed_to_be_invalid is False, we _must_ be able to find 'lkn'
146      in admin_locks; else we must assert.  If it is True, it's OK for
147      it not to be findable, but in that case we must return
148      Lock_INVALID right away. */
149   Lock* lock_list = HG_(get_admin_locks)();
150   while (lock_list) {
151      if (lock_list == lkn)
152         break;
153      lock_list = lock_list->admin_next;
154   }
155   if (lock_list == NULL) {
156      /* We didn't find it.  That possibility has to be OK'd by the
157         caller. */
158      tl_assert(allowed_to_be_invalid);
159      return Lock_INVALID;
160   }
161
162   /* So we must be looking at a valid LockN. */
163   tl_assert( HG_(is_sane_LockN)(lkn) );
164
165   if (!map_LockN_to_P) {
166      map_LockN_to_P = VG_(newFM)( HG_(zalloc), "hg.mLPfLN.1",
167                                   HG_(free), lock_unique_cmp );
168   }
169   if (!VG_(lookupFM)( map_LockN_to_P, NULL, (UWord*)&lkp, (UWord)lkn)) {
170      lkp = HG_(zalloc)( "hg.mLPfLN.2", sizeof(Lock) );
171      *lkp = *lkn;
172      lkp->admin_next = NULL;
173      lkp->admin_prev = NULL;
174      lkp->magic = LockP_MAGIC;
175      /* Forget about the bag of lock holders - don't copy that.
176         Also, acquired_at should be NULL whenever heldBy is, and vice
177         versa.  Also forget about the associated libhb synch object. */
178      lkp->heldW  = False;
179      lkp->heldBy = NULL;
180      lkp->acquired_at = NULL;
181      lkp->hbso = NULL;
182      VG_(addToFM)( map_LockN_to_P, (UWord)lkp, (UWord)lkp );
183   }
184   tl_assert( HG_(is_sane_LockP)(lkp) );
185   return lkp;
186}
187
188static Int sort_by_guestaddr(const void* n1, const void* n2)
189{
190   const Lock* l1 = *(const Lock *const *)n1;
191   const Lock* l2 = *(const Lock *const *)n2;
192
193   Addr a1 = l1 == Lock_INVALID ? 0 : l1->guestaddr;
194   Addr a2 = l2 == Lock_INVALID ? 0 : l2->guestaddr;
195   if (a1 < a2) return -1;
196   if (a1 > a2) return 1;
197   return 0;
198}
199
200/* Expand a WordSet of LockN*'s into a NULL-terminated vector of
201   LockP*'s.  Any LockN's that can't be converted into a LockP
202   (because they have been freed, see comment on mk_LockP_from_LockN)
203   are converted instead into the value Lock_INVALID.  Hence the
204   returned vector is a sequence: zero or more (valid LockP* or
205   LockN_INVALID), terminated by a NULL. */
206static
207Lock** enumerate_WordSet_into_LockP_vector( WordSetU* univ_lsets,
208                                            WordSetID lockset,
209                                            Bool allowed_to_be_invalid )
210{
211   tl_assert(univ_lsets);
212   tl_assert( HG_(plausibleWS)(univ_lsets, lockset) );
213   UWord  nLocks = HG_(cardinalityWS)(univ_lsets, lockset);
214   Lock** lockPs = HG_(zalloc)( "hg.eWSiLPa",
215                                (nLocks+1) * sizeof(Lock*) );
216   tl_assert(lockPs[nLocks] == NULL); /* pre-NULL terminated */
217   UWord* lockNs  = NULL;
218   UWord  nLockNs = 0;
219   if (nLocks > 0)  {
220      /* HG_(getPayloadWS) doesn't assign non-NULL to &lockNs if the
221         lockset is empty; hence the guarding "if".  Sigh. */
222      HG_(getPayloadWS)( &lockNs, &nLockNs, univ_lsets, lockset );
223      tl_assert(lockNs);
224   }
225   UWord i;
226   /* Convert to LockPs. */
227   for (i = 0; i < nLockNs; i++) {
228      lockPs[i] = mk_LockP_from_LockN( (Lock*)lockNs[i],
229                                       allowed_to_be_invalid );
230   }
231   /* Sort the locks by increasing Lock::guestaddr to avoid jitters
232      in the output. */
233   VG_(ssort)(lockPs, nLockNs, sizeof lockPs[0], sort_by_guestaddr);
234
235   return lockPs;
236}
237
238/* Get the number of useful elements in a vector created by
239   enumerate_WordSet_into_LockP_vector.  Returns both the total number
240   of elements (not including the terminating NULL) and the number of
241   non-Lock_INVALID elements. */
242static void count_LockP_vector ( /*OUT*/UWord* nLocks,
243                                 /*OUT*/UWord* nLocksValid,
244                                 Lock** vec )
245{
246   tl_assert(vec);
247   *nLocks = *nLocksValid = 0;
248   UWord n = 0;
249   while (vec[n]) {
250      (*nLocks)++;
251      if (vec[n] != Lock_INVALID)
252         (*nLocksValid)++;
253      n++;
254   }
255}
256
257/* Find out whether 'lk' is in 'vec'. */
258static Bool elem_LockP_vector ( Lock** vec, Lock* lk )
259{
260   tl_assert(vec);
261   tl_assert(lk);
262   UWord n = 0;
263   while (vec[n]) {
264      if (vec[n] == lk)
265         return True;
266      n++;
267   }
268   return False;
269}
270
271
272/* Errors:
273
274      race: program counter
275            read or write
276            data size
277            previous state
278            current state
279
280      FIXME: how does state printing interact with lockset gc?
281      Are the locksets in prev/curr state always valid?
282      Ditto question for the threadsets
283          ThreadSets - probably are always valid if Threads
284          are never thrown away.
285          LockSets - could at least print the lockset elements that
286          correspond to actual locks at the time of printing.  Hmm.
287*/
288
289/* Error kinds */
290typedef
291   enum {
292      XE_Race=1101,      // race
293      XE_UnlockUnlocked, // unlocking a not-locked lock
294      XE_UnlockForeign,  // unlocking a lock held by some other thread
295      XE_UnlockBogus,    // unlocking an address not known to be a lock
296      XE_PthAPIerror,    // error from the POSIX pthreads API
297      XE_LockOrder,      // lock order error
298      XE_Misc            // misc other error (w/ string to describe it)
299   }
300   XErrorTag;
301
302/* Extra contexts for kinds */
303typedef
304   struct  {
305      XErrorTag tag;
306      union {
307         struct {
308            Addr        data_addr;
309            Int         szB;
310            AddrInfo    data_addrinfo;
311            Bool        isWrite;
312            Thread*     thr;
313            Lock**      locksHeldW;
314            /* h1_* and h2_* provide some description of a previously
315               observed access with which we are conflicting. */
316            Thread*     h1_ct; /* non-NULL means h1 info present */
317            ExeContext* h1_ct_mbsegstartEC;
318            ExeContext* h1_ct_mbsegendEC;
319            Thread*     h2_ct; /* non-NULL means h2 info present */
320            ExeContext* h2_ct_accEC;
321            Int         h2_ct_accSzB;
322            Bool        h2_ct_accIsW;
323            Lock**      h2_ct_locksHeldW;
324         } Race;
325         struct {
326            Thread* thr;  /* doing the unlocking */
327            Lock*   lock; /* lock (that is already unlocked) */
328         } UnlockUnlocked;
329         struct {
330            Thread* thr;    /* doing the unlocking */
331            Thread* owner;  /* thread that actually holds the lock */
332            Lock*   lock;   /* lock (that is held by 'owner') */
333         } UnlockForeign;
334         struct {
335            Thread* thr;     /* doing the unlocking */
336            Addr    lock_ga; /* purported address of the lock */
337         } UnlockBogus;
338         struct {
339            Thread* thr;
340            HChar*  fnname; /* persistent, in tool-arena */
341            Word    err;    /* pth error code */
342            HChar*  errstr; /* persistent, in tool-arena */
343         } PthAPIerror;
344         struct {
345            Thread*     thr;
346            /* The first 4 fields describe the previously observed
347               (should-be) ordering. */
348            Lock*       shouldbe_earlier_lk;
349            Lock*       shouldbe_later_lk;
350            ExeContext* shouldbe_earlier_ec;
351            ExeContext* shouldbe_later_ec;
352            /* In principle we need to record two more stacks, from
353               this thread, when acquiring the locks in the "wrong"
354               order.  In fact the wallclock-later acquisition by this
355               thread is recorded in the main stack for this error.
356               So we only need a stack for the earlier acquisition by
357               this thread. */
358            ExeContext* actual_earlier_ec;
359         } LockOrder;
360         struct {
361            Thread*     thr;
362            HChar*      errstr; /* persistent, in tool-arena */
363            HChar*      auxstr; /* optional, persistent, in tool-arena */
364            ExeContext* auxctx; /* optional */
365         } Misc;
366      } XE;
367   }
368   XError;
369
370static void init_XError ( XError* xe ) {
371   VG_(memset)(xe, 0, sizeof(*xe) );
372   xe->tag = XE_Race-1; /* bogus */
373}
374
375
376/* Extensions of suppressions */
377typedef
378   enum {
379      XS_Race=1201, /* race */
380      XS_FreeMemLock,
381      XS_UnlockUnlocked,
382      XS_UnlockForeign,
383      XS_UnlockBogus,
384      XS_PthAPIerror,
385      XS_LockOrder,
386      XS_Misc
387   }
388   XSuppTag;
389
390
391/* Updates the copy with address info if necessary. */
392UInt HG_(update_extra) ( const Error* err )
393{
394   XError* xe = (XError*)VG_(get_error_extra)(err);
395   tl_assert(xe);
396   //if (extra != NULL && Undescribed == extra->addrinfo.akind) {
397   //   describe_addr ( VG_(get_error_address)(err), &(extra->addrinfo) );
398   //}
399
400   if (xe->tag == XE_Race) {
401
402      /* Note the set of locks that the thread is (w-)holding.
403         Convert the WordSetID of LockN*'s into a NULL-terminated
404         vector of LockP*'s.  We don't expect to encounter any invalid
405         LockNs in this conversion. */
406      tl_assert(xe->XE.Race.thr);
407      xe->XE.Race.locksHeldW
408         = enumerate_WordSet_into_LockP_vector(
409              HG_(get_univ_lsets)(),
410              xe->XE.Race.thr->locksetW,
411              False/*!allowed_to_be_invalid*/
412           );
413
414      /* See if we can come up with a source level description of the
415         raced-upon address.  This is potentially expensive, which is
416         why it's only done at the update_extra point, not when the
417         error is initially created. */
418      static Int xxx = 0;
419      xxx++;
420      if (0)
421         VG_(printf)("HG_(update_extra): "
422                     "%d conflicting-event queries\n", xxx);
423
424      HG_(describe_addr) (xe->XE.Race.data_addr, &xe->XE.Race.data_addrinfo);
425
426      /* And poke around in the conflicting-event map, to see if we
427         can rustle up a plausible-looking conflicting memory access
428         to show. */
429      if (HG_(clo_history_level) >= 2) {
430         Thr*        thrp            = NULL;
431         ExeContext* wherep          = NULL;
432         Addr        acc_addr        = xe->XE.Race.data_addr;
433         Int         acc_szB         = xe->XE.Race.szB;
434         Thr*        acc_thr         = xe->XE.Race.thr->hbthr;
435         Bool        acc_isW         = xe->XE.Race.isWrite;
436         SizeT       conf_szB        = 0;
437         Bool        conf_isW        = False;
438         WordSetID   conf_locksHeldW = 0;
439         tl_assert(!xe->XE.Race.h2_ct_accEC);
440         tl_assert(!xe->XE.Race.h2_ct);
441         if (libhb_event_map_lookup(
442                &wherep, &thrp, &conf_szB, &conf_isW, &conf_locksHeldW,
443                acc_thr, acc_addr, acc_szB, acc_isW )) {
444            Thread* threadp;
445            tl_assert(wherep);
446            tl_assert(thrp);
447            threadp = libhb_get_Thr_hgthread( thrp );
448            tl_assert(threadp);
449            xe->XE.Race.h2_ct_accEC  = wherep;
450            xe->XE.Race.h2_ct        = threadp;
451            xe->XE.Race.h2_ct_accSzB = (Int)conf_szB;
452            xe->XE.Race.h2_ct_accIsW = conf_isW;
453            xe->XE.Race.h2_ct_locksHeldW
454               = enumerate_WordSet_into_LockP_vector(
455                    HG_(get_univ_lsets)(),
456                    conf_locksHeldW,
457                    True/*allowed_to_be_invalid*/
458                 );
459        }
460      }
461
462      // both NULL or both non-NULL
463      tl_assert( (!!xe->XE.Race.h2_ct) == (!!xe->XE.Race.h2_ct_accEC) );
464   }
465
466   return sizeof(XError);
467}
468
469void HG_(record_error_Race) ( Thread* thr,
470                              Addr data_addr, Int szB, Bool isWrite,
471                              Thread* h1_ct,
472                              ExeContext* h1_ct_segstart,
473                              ExeContext* h1_ct_mbsegendEC )
474{
475   XError xe;
476   tl_assert( HG_(is_sane_Thread)(thr) );
477
478#  if defined(VGO_linux)
479   /* Skip any races on locations apparently in GOTPLT sections.  This
480      is said to be caused by ld.so poking PLT table entries (or
481      whatever) when it writes the resolved address of a dynamically
482      linked routine, into the table (or whatever) when it is called
483      for the first time. */
484   {
485     VgSectKind sect = VG_(DebugInfo_sect_kind)( NULL, data_addr );
486     if (0) VG_(printf)("XXXXXXXXX RACE on %#lx %s\n",
487                        data_addr, VG_(pp_SectKind)(sect));
488     /* SectPLT is required on ???-linux */
489     if (sect == Vg_SectGOTPLT) return;
490     /* SectPLT is required on ppc32/64-linux */
491     if (sect == Vg_SectPLT) return;
492     /* SectGOT is required on arm-linux */
493     if (sect == Vg_SectGOT) return;
494   }
495#  endif
496
497   init_XError(&xe);
498   xe.tag = XE_Race;
499   xe.XE.Race.data_addr   = data_addr;
500   xe.XE.Race.szB         = szB;
501   xe.XE.Race.isWrite     = isWrite;
502   xe.XE.Race.thr         = thr;
503   tl_assert(isWrite == False || isWrite == True);
504   tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
505   /* Skip on the detailed description of the raced-on address at this
506      point; it's expensive.  Leave it for the update_extra function
507      if we ever make it that far. */
508   xe.XE.Race.data_addrinfo.tag = Addr_Undescribed;
509   // FIXME: tid vs thr
510   // Skip on any of the conflicting-access info at this point.
511   // It's expensive to obtain, and this error is more likely than
512   // not to be discarded.  We'll fill these fields in in
513   // HG_(update_extra) just above, assuming the error ever makes
514   // it that far (unlikely).
515   xe.XE.Race.h2_ct_accSzB = 0;
516   xe.XE.Race.h2_ct_accIsW = False;
517   xe.XE.Race.h2_ct_accEC  = NULL;
518   xe.XE.Race.h2_ct        = NULL;
519   tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
520   tl_assert( thr->coretid != VG_INVALID_THREADID );
521
522   xe.XE.Race.h1_ct              = h1_ct;
523   xe.XE.Race.h1_ct_mbsegstartEC = h1_ct_segstart;
524   xe.XE.Race.h1_ct_mbsegendEC   = h1_ct_mbsegendEC;
525
526   VG_(maybe_record_error)( thr->coretid,
527                            XE_Race, data_addr, NULL, &xe );
528}
529
530void HG_(record_error_UnlockUnlocked) ( Thread* thr, Lock* lk )
531{
532   XError xe;
533   tl_assert( HG_(is_sane_Thread)(thr) );
534   tl_assert( HG_(is_sane_LockN)(lk) );
535   init_XError(&xe);
536   xe.tag = XE_UnlockUnlocked;
537   xe.XE.UnlockUnlocked.thr
538      = thr;
539   xe.XE.UnlockUnlocked.lock
540      = mk_LockP_from_LockN(lk, False/*!allowed_to_be_invalid*/);
541   // FIXME: tid vs thr
542   tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
543   tl_assert( thr->coretid != VG_INVALID_THREADID );
544   VG_(maybe_record_error)( thr->coretid,
545                            XE_UnlockUnlocked, 0, NULL, &xe );
546}
547
548void HG_(record_error_UnlockForeign) ( Thread* thr,
549                                       Thread* owner, Lock* lk )
550{
551   XError xe;
552   tl_assert( HG_(is_sane_Thread)(thr) );
553   tl_assert( HG_(is_sane_Thread)(owner) );
554   tl_assert( HG_(is_sane_LockN)(lk) );
555   init_XError(&xe);
556   xe.tag = XE_UnlockForeign;
557   xe.XE.UnlockForeign.thr   = thr;
558   xe.XE.UnlockForeign.owner = owner;
559   xe.XE.UnlockForeign.lock
560      = mk_LockP_from_LockN(lk, False/*!allowed_to_be_invalid*/);
561   // FIXME: tid vs thr
562   tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
563   tl_assert( thr->coretid != VG_INVALID_THREADID );
564   VG_(maybe_record_error)( thr->coretid,
565                            XE_UnlockForeign, 0, NULL, &xe );
566}
567
568void HG_(record_error_UnlockBogus) ( Thread* thr, Addr lock_ga )
569{
570   XError xe;
571   tl_assert( HG_(is_sane_Thread)(thr) );
572   init_XError(&xe);
573   xe.tag = XE_UnlockBogus;
574   xe.XE.UnlockBogus.thr     = thr;
575   xe.XE.UnlockBogus.lock_ga = lock_ga;
576   // FIXME: tid vs thr
577   tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
578   tl_assert( thr->coretid != VG_INVALID_THREADID );
579   VG_(maybe_record_error)( thr->coretid,
580                            XE_UnlockBogus, 0, NULL, &xe );
581}
582
583void HG_(record_error_LockOrder)(
584        Thread*     thr,
585        Lock*       shouldbe_earlier_lk,
586        Lock*       shouldbe_later_lk,
587        ExeContext* shouldbe_earlier_ec,
588        ExeContext* shouldbe_later_ec,
589        ExeContext* actual_earlier_ec
590     )
591{
592   XError xe;
593   tl_assert( HG_(is_sane_Thread)(thr) );
594   tl_assert(HG_(clo_track_lockorders));
595   init_XError(&xe);
596   xe.tag = XE_LockOrder;
597   xe.XE.LockOrder.thr       = thr;
598   xe.XE.LockOrder.shouldbe_earlier_lk
599      = mk_LockP_from_LockN(shouldbe_earlier_lk,
600                            False/*!allowed_to_be_invalid*/);
601   xe.XE.LockOrder.shouldbe_earlier_ec = shouldbe_earlier_ec;
602   xe.XE.LockOrder.shouldbe_later_lk
603      = mk_LockP_from_LockN(shouldbe_later_lk,
604                            False/*!allowed_to_be_invalid*/);
605   xe.XE.LockOrder.shouldbe_later_ec   = shouldbe_later_ec;
606   xe.XE.LockOrder.actual_earlier_ec   = actual_earlier_ec;
607   // FIXME: tid vs thr
608   tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
609   tl_assert( thr->coretid != VG_INVALID_THREADID );
610   VG_(maybe_record_error)( thr->coretid,
611                            XE_LockOrder, 0, NULL, &xe );
612}
613
614void HG_(record_error_PthAPIerror) ( Thread* thr, const HChar* fnname,
615                                     Word err, const HChar* errstr )
616{
617   XError xe;
618   tl_assert( HG_(is_sane_Thread)(thr) );
619   tl_assert(fnname);
620   tl_assert(errstr);
621   init_XError(&xe);
622   xe.tag = XE_PthAPIerror;
623   xe.XE.PthAPIerror.thr    = thr;
624   xe.XE.PthAPIerror.fnname = string_table_strdup(fnname);
625   xe.XE.PthAPIerror.err    = err;
626   xe.XE.PthAPIerror.errstr = string_table_strdup(errstr);
627   // FIXME: tid vs thr
628   tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
629   tl_assert( thr->coretid != VG_INVALID_THREADID );
630   VG_(maybe_record_error)( thr->coretid,
631                            XE_PthAPIerror, 0, NULL, &xe );
632}
633
634void HG_(record_error_Misc_w_aux) ( Thread* thr, const HChar* errstr,
635                                    const HChar* auxstr, ExeContext* auxctx )
636{
637   XError xe;
638   tl_assert( HG_(is_sane_Thread)(thr) );
639   tl_assert(errstr);
640   init_XError(&xe);
641   xe.tag = XE_Misc;
642   xe.XE.Misc.thr    = thr;
643   xe.XE.Misc.errstr = string_table_strdup(errstr);
644   xe.XE.Misc.auxstr = auxstr ? string_table_strdup(auxstr) : NULL;
645   xe.XE.Misc.auxctx = auxctx;
646   // FIXME: tid vs thr
647   tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
648   tl_assert( thr->coretid != VG_INVALID_THREADID );
649   VG_(maybe_record_error)( thr->coretid,
650                            XE_Misc, 0, NULL, &xe );
651}
652
653void HG_(record_error_Misc) ( Thread* thr, const HChar* errstr )
654{
655   HG_(record_error_Misc_w_aux)(thr, errstr, NULL, NULL);
656}
657
658Bool HG_(eq_Error) ( VgRes not_used, const Error* e1, const Error* e2 )
659{
660   XError *xe1, *xe2;
661
662   tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
663
664   xe1 = (XError*)VG_(get_error_extra)(e1);
665   xe2 = (XError*)VG_(get_error_extra)(e2);
666   tl_assert(xe1);
667   tl_assert(xe2);
668
669   switch (VG_(get_error_kind)(e1)) {
670      case XE_Race:
671         return xe1->XE.Race.szB == xe2->XE.Race.szB
672                && xe1->XE.Race.isWrite == xe2->XE.Race.isWrite
673                && (HG_(clo_cmp_race_err_addrs)
674                       ? xe1->XE.Race.data_addr == xe2->XE.Race.data_addr
675                       : True);
676      case XE_UnlockUnlocked:
677         return xe1->XE.UnlockUnlocked.thr == xe2->XE.UnlockUnlocked.thr
678                && xe1->XE.UnlockUnlocked.lock == xe2->XE.UnlockUnlocked.lock;
679      case XE_UnlockForeign:
680         return xe1->XE.UnlockForeign.thr == xe2->XE.UnlockForeign.thr
681                && xe1->XE.UnlockForeign.owner == xe2->XE.UnlockForeign.owner
682                && xe1->XE.UnlockForeign.lock == xe2->XE.UnlockForeign.lock;
683      case XE_UnlockBogus:
684         return xe1->XE.UnlockBogus.thr == xe2->XE.UnlockBogus.thr
685                && xe1->XE.UnlockBogus.lock_ga == xe2->XE.UnlockBogus.lock_ga;
686      case XE_PthAPIerror:
687         return xe1->XE.PthAPIerror.thr == xe2->XE.PthAPIerror.thr
688                && 0==VG_(strcmp)(xe1->XE.PthAPIerror.fnname,
689                                  xe2->XE.PthAPIerror.fnname)
690                && xe1->XE.PthAPIerror.err == xe2->XE.PthAPIerror.err;
691      case XE_LockOrder:
692         return xe1->XE.LockOrder.thr == xe2->XE.LockOrder.thr;
693      case XE_Misc:
694         return xe1->XE.Misc.thr == xe2->XE.Misc.thr
695                && 0==VG_(strcmp)(xe1->XE.Misc.errstr, xe2->XE.Misc.errstr);
696      default:
697         tl_assert(0);
698   }
699
700   /*NOTREACHED*/
701   tl_assert(0);
702}
703
704
705/*----------------------------------------------------------------*/
706/*--- Error management -- printing                             ---*/
707/*----------------------------------------------------------------*/
708
709/* Do a printf-style operation on either the XML or normal output
710   channel, depending on the setting of VG_(clo_xml).
711*/
712static void emit_WRK ( const HChar* format, va_list vargs )
713{
714   if (VG_(clo_xml)) {
715      VG_(vprintf_xml)(format, vargs);
716   } else {
717      VG_(vmessage)(Vg_UserMsg, format, vargs);
718   }
719}
720static void emit ( const HChar* format, ... ) PRINTF_CHECK(1, 2);
721static void emit ( const HChar* format, ... )
722{
723   va_list vargs;
724   va_start(vargs, format);
725   emit_WRK(format, vargs);
726   va_end(vargs);
727}
728
729
730/* Announce (that is, print the point-of-creation) of 'thr'.  Only do
731   this once, as we only want to see these announcements once per
732   thread.  Returned Bool indicates whether or not an announcement was
733   made.
734*/
735static Bool announce_one_thread ( Thread* thr )
736{
737   tl_assert(HG_(is_sane_Thread)(thr));
738   tl_assert(thr->errmsg_index >= 1);
739   if (thr->announced)
740      return False;
741
742   if (VG_(clo_xml)) {
743
744      VG_(printf_xml)("<announcethread>\n");
745      VG_(printf_xml)("  <hthreadid>%d</hthreadid>\n", thr->errmsg_index);
746      if (thr->errmsg_index == 1) {
747         tl_assert(thr->created_at == NULL);
748         VG_(printf_xml)("  <isrootthread></isrootthread>\n");
749      } else {
750         tl_assert(thr->created_at != NULL);
751         VG_(pp_ExeContext)( thr->created_at );
752      }
753      VG_(printf_xml)("</announcethread>\n\n");
754
755   } else {
756
757      VG_(umsg)("---Thread-Announcement----------"
758                "--------------------------------" "\n");
759      VG_(umsg)("\n");
760
761      if (thr->errmsg_index == 1) {
762         tl_assert(thr->created_at == NULL);
763         VG_(message)(Vg_UserMsg,
764                      "Thread #%d is the program's root thread\n",
765                       thr->errmsg_index);
766      } else {
767         tl_assert(thr->created_at != NULL);
768         VG_(message)(Vg_UserMsg, "Thread #%d was created\n",
769                                  thr->errmsg_index);
770         VG_(pp_ExeContext)( thr->created_at );
771      }
772      VG_(message)(Vg_UserMsg, "\n");
773
774   }
775
776   thr->announced = True;
777   return True;
778}
779
780/* Announce 'lk'. */
781static void announce_LockP ( Lock* lk )
782{
783   tl_assert(lk);
784   if (lk == Lock_INVALID)
785      return; /* Can't be announced -- we know nothing about it. */
786   tl_assert(lk->magic == LockP_MAGIC);
787
788   if (VG_(clo_xml)) {
789      if (lk->appeared_at) {
790         emit( "  <auxwhat>Lock at %p was first observed</auxwhat>\n",
791               (void*)lk );
792         VG_(pp_ExeContext)( lk->appeared_at );
793      }
794
795   } else {
796      if (lk->appeared_at) {
797         VG_(umsg)( " Lock at %p was first observed\n",
798                    (void*)lk->guestaddr );
799         VG_(pp_ExeContext)( lk->appeared_at );
800      } else {
801         VG_(umsg)( " Lock at %p : no stacktrace for first observation\n",
802                    (void*)lk->guestaddr );
803      }
804      HG_(get_and_pp_addrdescr) (lk->guestaddr);
805      VG_(umsg)("\n");
806   }
807}
808
809/* Announce (that is, print point-of-first-observation) for the
810   locks in 'lockvec' and, if non-NULL, 'lockvec2'. */
811static void announce_combined_LockP_vecs ( Lock** lockvec,
812                                           Lock** lockvec2 )
813{
814   UWord i;
815   tl_assert(lockvec);
816   for (i = 0; lockvec[i]; i++) {
817      announce_LockP(lockvec[i]);
818   }
819   if (lockvec2) {
820      for (i = 0; lockvec2[i]; i++) {
821         Lock* lk = lockvec2[i];
822         if (!elem_LockP_vector(lockvec, lk))
823            announce_LockP(lk);
824      }
825   }
826}
827
828
829static void show_LockP_summary_textmode ( Lock** locks, const HChar* pre )
830{
831   tl_assert(locks);
832   UWord i;
833   UWord nLocks = 0, nLocksValid = 0;
834   count_LockP_vector(&nLocks, &nLocksValid, locks);
835   tl_assert(nLocksValid <= nLocks);
836
837   if (nLocks == 0) {
838      VG_(umsg)( "%sLocks held: none", pre );
839   } else {
840      VG_(umsg)( "%sLocks held: %lu, at address%s ",
841                 pre, nLocks, nLocksValid == 1 ? "" : "es" );
842   }
843
844   if (nLocks > 0) {
845      for (i = 0; i < nLocks; i++) {
846         if (locks[i] == Lock_INVALID)
847            continue;
848         VG_(umsg)( "%p", (void*)locks[i]->guestaddr);
849         if (locks[i+1] != NULL)
850            VG_(umsg)(" ");
851      }
852      if (nLocksValid < nLocks)
853         VG_(umsg)(" (and %lu that can't be shown)", nLocks - nLocksValid);
854   }
855   VG_(umsg)("\n");
856}
857
858
859/* This is the "this error is due to be printed shortly; so have a
860   look at it any print any preamble you want" function.  We use it to
861   announce any previously un-announced threads in the upcoming error
862   message.
863*/
864void HG_(before_pp_Error) ( const Error* err )
865{
866   XError* xe;
867   tl_assert(err);
868   xe = (XError*)VG_(get_error_extra)(err);
869   tl_assert(xe);
870
871   switch (VG_(get_error_kind)(err)) {
872      case XE_Misc:
873         announce_one_thread( xe->XE.Misc.thr );
874         break;
875      case XE_LockOrder:
876         announce_one_thread( xe->XE.LockOrder.thr );
877         break;
878      case XE_PthAPIerror:
879         announce_one_thread( xe->XE.PthAPIerror.thr );
880         break;
881      case XE_UnlockBogus:
882         announce_one_thread( xe->XE.UnlockBogus.thr );
883         break;
884      case XE_UnlockForeign:
885         announce_one_thread( xe->XE.UnlockForeign.thr );
886         announce_one_thread( xe->XE.UnlockForeign.owner );
887         break;
888      case XE_UnlockUnlocked:
889         announce_one_thread( xe->XE.UnlockUnlocked.thr );
890         break;
891      case XE_Race:
892         announce_one_thread( xe->XE.Race.thr );
893         if (xe->XE.Race.h2_ct)
894            announce_one_thread( xe->XE.Race.h2_ct );
895         if (xe->XE.Race.h1_ct)
896            announce_one_thread( xe->XE.Race.h1_ct );
897         if (xe->XE.Race.data_addrinfo.Addr.Block.alloc_tinfo.tnr) {
898            Thread* thr = get_admin_threads();
899            while (thr) {
900               if (thr->errmsg_index
901                   == xe->XE.Race.data_addrinfo.Addr.Block.alloc_tinfo.tnr) {
902                  announce_one_thread (thr);
903                  break;
904               }
905               thr = thr->admin;
906            }
907         }
908         break;
909      default:
910         tl_assert(0);
911   }
912}
913
914void HG_(pp_Error) ( const Error* err )
915{
916   const Bool xml = VG_(clo_xml); /* a shorthand, that's all */
917
918   if (!xml) {
919      VG_(umsg)("--------------------------------"
920                "--------------------------------" "\n");
921      VG_(umsg)("\n");
922   }
923
924   XError *xe = (XError*)VG_(get_error_extra)(err);
925   tl_assert(xe);
926
927   if (xml)
928      emit( "  <kind>%s</kind>\n", HG_(get_error_name)(err));
929
930   switch (VG_(get_error_kind)(err)) {
931
932   case XE_Misc: {
933      tl_assert( HG_(is_sane_Thread)( xe->XE.Misc.thr ) );
934
935      if (xml) {
936
937         emit( "  <xwhat>\n" );
938         emit( "    <text>Thread #%d: %s</text>\n",
939               (Int)xe->XE.Misc.thr->errmsg_index,
940               xe->XE.Misc.errstr );
941         emit( "    <hthreadid>%d</hthreadid>\n",
942               (Int)xe->XE.Misc.thr->errmsg_index );
943         emit( "  </xwhat>\n" );
944         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
945         if (xe->XE.Misc.auxstr) {
946            emit("  <auxwhat>%s</auxwhat>\n", xe->XE.Misc.auxstr);
947            if (xe->XE.Misc.auxctx)
948               VG_(pp_ExeContext)( xe->XE.Misc.auxctx );
949         }
950
951      } else {
952
953         emit( "Thread #%d: %s\n",
954               (Int)xe->XE.Misc.thr->errmsg_index,
955               xe->XE.Misc.errstr );
956         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
957         if (xe->XE.Misc.auxstr) {
958            emit(" %s\n", xe->XE.Misc.auxstr);
959            if (xe->XE.Misc.auxctx)
960               VG_(pp_ExeContext)( xe->XE.Misc.auxctx );
961         }
962
963      }
964      break;
965   }
966
967   case XE_LockOrder: {
968      tl_assert( HG_(is_sane_Thread)( xe->XE.LockOrder.thr ) );
969
970      if (xml) {
971
972         emit( "  <xwhat>\n" );
973         emit( "    <text>Thread #%d: lock order \"%p before %p\" "
974                    "violated</text>\n",
975               (Int)xe->XE.LockOrder.thr->errmsg_index,
976               (void*)xe->XE.LockOrder.shouldbe_earlier_lk->guestaddr,
977               (void*)xe->XE.LockOrder.shouldbe_later_lk->guestaddr );
978         emit( "    <hthreadid>%d</hthreadid>\n",
979               (Int)xe->XE.LockOrder.thr->errmsg_index );
980         emit( "  </xwhat>\n" );
981         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
982         if (xe->XE.LockOrder.shouldbe_earlier_ec
983             && xe->XE.LockOrder.shouldbe_later_ec) {
984            emit( "  <auxwhat>Required order was established by "
985                  "acquisition of lock at %p</auxwhat>\n",
986                  (void*)xe->XE.LockOrder.shouldbe_earlier_lk->guestaddr );
987            VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_earlier_ec );
988            emit( "  <auxwhat>followed by a later acquisition "
989                  "of lock at %p</auxwhat>\n",
990                  (void*)xe->XE.LockOrder.shouldbe_later_lk->guestaddr );
991            VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_later_ec );
992         }
993         announce_LockP ( xe->XE.LockOrder.shouldbe_earlier_lk );
994         announce_LockP ( xe->XE.LockOrder.shouldbe_later_lk );
995
996      } else {
997
998         emit( "Thread #%d: lock order \"%p before %p\" violated\n",
999               (Int)xe->XE.LockOrder.thr->errmsg_index,
1000               (void*)xe->XE.LockOrder.shouldbe_earlier_lk->guestaddr,
1001               (void*)xe->XE.LockOrder.shouldbe_later_lk->guestaddr );
1002         emit( "\n" );
1003         emit( "Observed (incorrect) order is: "
1004               "acquisition of lock at %p\n",
1005               (void*)xe->XE.LockOrder.shouldbe_later_lk->guestaddr);
1006         if (xe->XE.LockOrder.actual_earlier_ec) {
1007             VG_(pp_ExeContext)(xe->XE.LockOrder.actual_earlier_ec);
1008         } else {
1009            emit("   (stack unavailable)\n");
1010         }
1011         emit( "\n" );
1012         emit(" followed by a later acquisition of lock at %p\n",
1013              (void*)xe->XE.LockOrder.shouldbe_earlier_lk->guestaddr);
1014         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1015         if (xe->XE.LockOrder.shouldbe_earlier_ec
1016             && xe->XE.LockOrder.shouldbe_later_ec) {
1017            emit("\n");
1018            emit( "Required order was established by "
1019                  "acquisition of lock at %p\n",
1020                  (void*)xe->XE.LockOrder.shouldbe_earlier_lk->guestaddr );
1021            VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_earlier_ec );
1022            emit( "\n" );
1023            emit( " followed by a later acquisition of lock at %p\n",
1024                  (void*)xe->XE.LockOrder.shouldbe_later_lk->guestaddr );
1025            VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_later_ec );
1026         }
1027         emit("\n");
1028         announce_LockP ( xe->XE.LockOrder.shouldbe_earlier_lk );
1029         announce_LockP ( xe->XE.LockOrder.shouldbe_later_lk );
1030
1031      }
1032
1033      break;
1034   }
1035
1036   case XE_PthAPIerror: {
1037      tl_assert( HG_(is_sane_Thread)( xe->XE.PthAPIerror.thr ) );
1038
1039      if (xml) {
1040
1041         emit( "  <xwhat>\n" );
1042         emit(
1043            "    <text>Thread #%d's call to %pS failed</text>\n",
1044            (Int)xe->XE.PthAPIerror.thr->errmsg_index,
1045            xe->XE.PthAPIerror.fnname );
1046         emit( "    <hthreadid>%d</hthreadid>\n",
1047               (Int)xe->XE.PthAPIerror.thr->errmsg_index );
1048         emit( "  </xwhat>\n" );
1049         emit( "  <what>with error code %ld (%s)</what>\n",
1050               xe->XE.PthAPIerror.err, xe->XE.PthAPIerror.errstr );
1051         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1052
1053      } else {
1054
1055         emit( "Thread #%d's call to %pS failed\n",
1056                      (Int)xe->XE.PthAPIerror.thr->errmsg_index,
1057                      xe->XE.PthAPIerror.fnname );
1058         emit( "   with error code %ld (%s)\n",
1059               xe->XE.PthAPIerror.err, xe->XE.PthAPIerror.errstr );
1060         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1061
1062      }
1063
1064      break;
1065   }
1066
1067   case XE_UnlockBogus: {
1068      tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockBogus.thr ) );
1069
1070      if (xml) {
1071
1072         emit( "  <xwhat>\n" );
1073         emit( "    <text>Thread #%d unlocked an invalid "
1074                    "lock at %p</text>\n",
1075               (Int)xe->XE.UnlockBogus.thr->errmsg_index,
1076               (void*)xe->XE.UnlockBogus.lock_ga );
1077         emit( "    <hthreadid>%d</hthreadid>\n",
1078               (Int)xe->XE.UnlockBogus.thr->errmsg_index );
1079         emit( "  </xwhat>\n" );
1080         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1081
1082      } else {
1083
1084         emit( "Thread #%d unlocked an invalid lock at %p\n",
1085               (Int)xe->XE.UnlockBogus.thr->errmsg_index,
1086               (void*)xe->XE.UnlockBogus.lock_ga );
1087         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1088
1089      }
1090
1091      break;
1092   }
1093
1094   case XE_UnlockForeign: {
1095      tl_assert( HG_(is_sane_LockP)( xe->XE.UnlockForeign.lock ) );
1096      tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockForeign.owner ) );
1097      tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockForeign.thr ) );
1098
1099      if (xml) {
1100
1101         emit( "  <xwhat>\n" );
1102         emit( "    <text>Thread #%d unlocked lock at %p "
1103                    "currently held by thread #%d</text>\n",
1104               (Int)xe->XE.UnlockForeign.thr->errmsg_index,
1105               (void*)xe->XE.UnlockForeign.lock->guestaddr,
1106               (Int)xe->XE.UnlockForeign.owner->errmsg_index );
1107         emit( "    <hthreadid>%d</hthreadid>\n",
1108               (Int)xe->XE.UnlockForeign.thr->errmsg_index );
1109         emit( "    <hthreadid>%d</hthreadid>\n",
1110               (Int)xe->XE.UnlockForeign.owner->errmsg_index );
1111         emit( "  </xwhat>\n" );
1112         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1113         announce_LockP ( xe->XE.UnlockForeign.lock );
1114
1115      } else {
1116
1117         emit( "Thread #%d unlocked lock at %p "
1118               "currently held by thread #%d\n",
1119               (Int)xe->XE.UnlockForeign.thr->errmsg_index,
1120               (void*)xe->XE.UnlockForeign.lock->guestaddr,
1121               (Int)xe->XE.UnlockForeign.owner->errmsg_index );
1122         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1123         announce_LockP ( xe->XE.UnlockForeign.lock );
1124
1125      }
1126
1127      break;
1128   }
1129
1130   case XE_UnlockUnlocked: {
1131      tl_assert( HG_(is_sane_LockP)( xe->XE.UnlockUnlocked.lock ) );
1132      tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockUnlocked.thr ) );
1133
1134      if (xml) {
1135
1136         emit( "  <xwhat>\n" );
1137         emit( "    <text>Thread #%d unlocked a "
1138                    "not-locked lock at %p</text>\n",
1139               (Int)xe->XE.UnlockUnlocked.thr->errmsg_index,
1140               (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
1141         emit( "    <hthreadid>%d</hthreadid>\n",
1142               (Int)xe->XE.UnlockUnlocked.thr->errmsg_index );
1143         emit( "  </xwhat>\n" );
1144         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1145         announce_LockP ( xe->XE.UnlockUnlocked.lock);
1146
1147      } else {
1148
1149         emit( "Thread #%d unlocked a not-locked lock at %p\n",
1150               (Int)xe->XE.UnlockUnlocked.thr->errmsg_index,
1151               (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
1152         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1153         announce_LockP ( xe->XE.UnlockUnlocked.lock);
1154
1155      }
1156
1157      break;
1158   }
1159
1160   case XE_Race: {
1161      Addr      err_ga;
1162      const HChar* what;
1163      Int       szB;
1164      what      = xe->XE.Race.isWrite ? "write" : "read";
1165      szB       = xe->XE.Race.szB;
1166      err_ga = VG_(get_error_address)(err);
1167
1168      tl_assert( HG_(is_sane_Thread)( xe->XE.Race.thr ));
1169      if (xe->XE.Race.h2_ct)
1170         tl_assert( HG_(is_sane_Thread)( xe->XE.Race.h2_ct ));
1171
1172      if (xml) {
1173
1174         /* ------ XML ------ */
1175         emit( "  <xwhat>\n" );
1176         emit( "    <text>Possible data race during %s of size %d "
1177                    "at %p by thread #%d</text>\n",
1178               what, szB, (void*)err_ga, (Int)xe->XE.Race.thr->errmsg_index );
1179         emit( "    <hthreadid>%d</hthreadid>\n",
1180               (Int)xe->XE.Race.thr->errmsg_index );
1181         emit( "  </xwhat>\n" );
1182         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1183
1184         if (xe->XE.Race.h2_ct) {
1185            tl_assert(xe->XE.Race.h2_ct_accEC); // assured by update_extra
1186            emit( "  <xauxwhat>\n");
1187            emit( "    <text>This conflicts with a previous %s of size %d "
1188                            "by thread #%d</text>\n",
1189                  xe->XE.Race.h2_ct_accIsW ? "write" : "read",
1190                  xe->XE.Race.h2_ct_accSzB,
1191                  xe->XE.Race.h2_ct->errmsg_index );
1192            emit( "    <hthreadid>%d</hthreadid>\n",
1193                  xe->XE.Race.h2_ct->errmsg_index);
1194            emit("  </xauxwhat>\n");
1195            VG_(pp_ExeContext)( xe->XE.Race.h2_ct_accEC );
1196         }
1197
1198         if (xe->XE.Race.h1_ct) {
1199            emit( "  <xauxwhat>\n");
1200            emit( "    <text>This conflicts with a previous access "
1201                  "by thread #%d, after</text>\n",
1202                  xe->XE.Race.h1_ct->errmsg_index );
1203            emit( "    <hthreadid>%d</hthreadid>\n",
1204                  xe->XE.Race.h1_ct->errmsg_index );
1205            emit("  </xauxwhat>\n");
1206            if (xe->XE.Race.h1_ct_mbsegstartEC) {
1207               VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegstartEC );
1208            } else {
1209               emit( "  <auxwhat>(the start of the thread)</auxwhat>\n" );
1210            }
1211            emit( "  <auxwhat>but before</auxwhat>\n" );
1212            if (xe->XE.Race.h1_ct_mbsegendEC) {
1213               VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegendEC );
1214            } else {
1215               emit( "  <auxwhat>(the end of the thread)</auxwhat>\n" );
1216            }
1217         }
1218
1219      } else {
1220
1221         /* ------ Text ------ */
1222         announce_combined_LockP_vecs( xe->XE.Race.locksHeldW,
1223                                       xe->XE.Race.h2_ct_locksHeldW );
1224
1225         emit( "Possible data race during %s of size %d "
1226               "at %p by thread #%d\n",
1227               what, szB, (void*)err_ga, (Int)xe->XE.Race.thr->errmsg_index );
1228
1229         tl_assert(xe->XE.Race.locksHeldW);
1230         show_LockP_summary_textmode( xe->XE.Race.locksHeldW, "" );
1231         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1232
1233         if (xe->XE.Race.h2_ct) {
1234            tl_assert(xe->XE.Race.h2_ct_accEC); // assured by update_extra
1235            tl_assert(xe->XE.Race.h2_ct_locksHeldW);
1236            emit( "\n" );
1237            emit( "This conflicts with a previous %s of size %d "
1238                  "by thread #%d\n",
1239                  xe->XE.Race.h2_ct_accIsW ? "write" : "read",
1240                  xe->XE.Race.h2_ct_accSzB,
1241                  xe->XE.Race.h2_ct->errmsg_index );
1242            show_LockP_summary_textmode( xe->XE.Race.h2_ct_locksHeldW, "" );
1243            VG_(pp_ExeContext)( xe->XE.Race.h2_ct_accEC );
1244         }
1245
1246         if (xe->XE.Race.h1_ct) {
1247            emit( " This conflicts with a previous access by thread #%d, "
1248                  "after\n",
1249                  xe->XE.Race.h1_ct->errmsg_index );
1250            if (xe->XE.Race.h1_ct_mbsegstartEC) {
1251               VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegstartEC );
1252            } else {
1253               emit( "   (the start of the thread)\n" );
1254            }
1255            emit( " but before\n" );
1256            if (xe->XE.Race.h1_ct_mbsegendEC) {
1257               VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegendEC );
1258            } else {
1259               emit( "   (the end of the thread)\n" );
1260            }
1261         }
1262
1263      }
1264      VG_(pp_addrinfo) (err_ga, &xe->XE.Race.data_addrinfo);
1265      break; /* case XE_Race */
1266   } /* case XE_Race */
1267
1268   default:
1269      tl_assert(0);
1270   } /* switch (VG_(get_error_kind)(err)) */
1271}
1272
1273void HG_(print_access) (StackTrace ips, UInt n_ips,
1274                        Thr* thr_a,
1275                        Addr  ga,
1276                        SizeT SzB,
1277                        Bool  isW,
1278                        WordSetID locksHeldW )
1279{
1280   Thread* threadp;
1281
1282   threadp = libhb_get_Thr_hgthread( thr_a );
1283   tl_assert(threadp);
1284   if (!threadp->announced) {
1285      /* This is for interactive use. We announce the thread if needed,
1286         but reset it to not announced afterwards, because we want
1287         the thread to be announced on the error output/log if needed. */
1288      announce_one_thread (threadp);
1289      threadp->announced = False;
1290   }
1291
1292   announce_one_thread (threadp);
1293   VG_(printf) ("%s of size %d at %p by thread #%d",
1294                isW ? "write" : "read",
1295                (int)SzB, (void*)ga, threadp->errmsg_index);
1296   if (threadp->coretid == VG_INVALID_THREADID)
1297      VG_(printf)(" tid (exited)\n");
1298   else
1299      VG_(printf)(" tid %u\n", threadp->coretid);
1300   {
1301      Lock** locksHeldW_P;
1302      locksHeldW_P = enumerate_WordSet_into_LockP_vector(
1303                       HG_(get_univ_lsets)(),
1304                       locksHeldW,
1305                       True/*allowed_to_be_invalid*/
1306                    );
1307      show_LockP_summary_textmode( locksHeldW_P, "" );
1308      HG_(free) (locksHeldW_P);
1309   }
1310   VG_(pp_StackTrace) (ips, n_ips);
1311   VG_(printf) ("\n");
1312}
1313
1314const HChar* HG_(get_error_name) ( const Error* err )
1315{
1316   switch (VG_(get_error_kind)(err)) {
1317      case XE_Race:           return "Race";
1318      case XE_UnlockUnlocked: return "UnlockUnlocked";
1319      case XE_UnlockForeign:  return "UnlockForeign";
1320      case XE_UnlockBogus:    return "UnlockBogus";
1321      case XE_PthAPIerror:    return "PthAPIerror";
1322      case XE_LockOrder:      return "LockOrder";
1323      case XE_Misc:           return "Misc";
1324      default: tl_assert(0); /* fill in missing case */
1325   }
1326}
1327
1328Bool HG_(recognised_suppression) ( const HChar* name, Supp *su )
1329{
1330#  define TRY(_name,_xskind)                   \
1331      if (0 == VG_(strcmp)(name, (_name))) {   \
1332         VG_(set_supp_kind)(su, (_xskind));    \
1333         return True;                          \
1334      }
1335   TRY("Race",           XS_Race);
1336   TRY("FreeMemLock",    XS_FreeMemLock);
1337   TRY("UnlockUnlocked", XS_UnlockUnlocked);
1338   TRY("UnlockForeign",  XS_UnlockForeign);
1339   TRY("UnlockBogus",    XS_UnlockBogus);
1340   TRY("PthAPIerror",    XS_PthAPIerror);
1341   TRY("LockOrder",      XS_LockOrder);
1342   TRY("Misc",           XS_Misc);
1343   return False;
1344#  undef TRY
1345}
1346
1347Bool HG_(read_extra_suppression_info) ( Int fd, HChar** bufpp, SizeT* nBufp,
1348                                        Int* lineno, Supp* su )
1349{
1350   /* do nothing -- no extra suppression info present.  Return True to
1351      indicate nothing bad happened. */
1352   return True;
1353}
1354
1355Bool HG_(error_matches_suppression) ( const Error* err, const Supp* su )
1356{
1357   switch (VG_(get_supp_kind)(su)) {
1358   case XS_Race:           return VG_(get_error_kind)(err) == XE_Race;
1359   case XS_UnlockUnlocked: return VG_(get_error_kind)(err) == XE_UnlockUnlocked;
1360   case XS_UnlockForeign:  return VG_(get_error_kind)(err) == XE_UnlockForeign;
1361   case XS_UnlockBogus:    return VG_(get_error_kind)(err) == XE_UnlockBogus;
1362   case XS_PthAPIerror:    return VG_(get_error_kind)(err) == XE_PthAPIerror;
1363   case XS_LockOrder:      return VG_(get_error_kind)(err) == XE_LockOrder;
1364   case XS_Misc:           return VG_(get_error_kind)(err) == XE_Misc;
1365   //case XS_: return VG_(get_error_kind)(err) == XE_;
1366   default: tl_assert(0); /* fill in missing cases */
1367   }
1368}
1369
1370SizeT HG_(get_extra_suppression_info) ( const Error* err,
1371                                       /*OUT*/HChar* buf, Int nBuf )
1372{
1373   tl_assert(nBuf >= 1);
1374   /* Do nothing */
1375   buf[0] = '\0';
1376   return 0;
1377}
1378
1379SizeT HG_(print_extra_suppression_use) ( const Supp* su,
1380                                        /*OUT*/HChar* buf, Int nBuf )
1381{
1382   tl_assert(nBuf >= 1);
1383   /* Do nothing */
1384   buf[0] = '\0';
1385   return 0;
1386}
1387
1388void HG_(update_extra_suppression_use) ( const Error* err, const Supp* su )
1389{
1390   /* Do nothing */
1391   return;
1392}
1393
1394
1395/*--------------------------------------------------------------------*/
1396/*--- end                                              hg_errors.c ---*/
1397/*--------------------------------------------------------------------*/
1398